lmnr 0.6.18__py3-none-any.whl → 0.6.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py +55 -20
  2. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/schema_utils.py +23 -0
  3. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/__init__.py +61 -0
  4. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/__init__.py +442 -0
  5. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +1024 -0
  6. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +297 -0
  7. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/config.py +16 -0
  8. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +308 -0
  9. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_emitter.py +100 -0
  10. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_models.py +41 -0
  11. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +68 -0
  12. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/utils.py +185 -0
  13. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v0/__init__.py +176 -0
  14. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/__init__.py +358 -0
  15. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +319 -0
  16. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +132 -0
  17. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/responses_wrappers.py +626 -0
  18. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/version.py +1 -0
  19. lmnr/opentelemetry_lib/tracing/_instrument_initializers.py +1 -3
  20. lmnr/sdk/browser/browser_use_otel.py +1 -1
  21. lmnr/sdk/browser/patchright_otel.py +0 -14
  22. lmnr/sdk/browser/playwright_otel.py +16 -130
  23. lmnr/sdk/browser/pw_utils.py +45 -31
  24. lmnr/version.py +1 -1
  25. {lmnr-0.6.18.dist-info → lmnr-0.6.19.dist-info}/METADATA +2 -5
  26. {lmnr-0.6.18.dist-info → lmnr-0.6.19.dist-info}/RECORD +28 -11
  27. {lmnr-0.6.18.dist-info → lmnr-0.6.19.dist-info}/WHEEL +1 -1
  28. {lmnr-0.6.18.dist-info → lmnr-0.6.19.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,626 @@
1
+ import json
2
+ import pydantic
3
+ import re
4
+ import time
5
+
6
+ from openai import AsyncStream, Stream
7
+
8
+ # Conditional imports for backward compatibility
9
+ try:
10
+ from openai.types.responses import (
11
+ FunctionToolParam,
12
+ Response,
13
+ ResponseInputItemParam,
14
+ ResponseInputParam,
15
+ ResponseOutputItem,
16
+ ResponseUsage,
17
+ ToolParam,
18
+ )
19
+ from openai.types.responses.response_output_message_param import (
20
+ ResponseOutputMessageParam,
21
+ )
22
+
23
+ RESPONSES_AVAILABLE = True
24
+ except ImportError:
25
+ # Fallback types for older OpenAI SDK versions
26
+ from typing import Any, Dict, List, Union
27
+
28
+ # Create basic fallback types
29
+ FunctionToolParam = Dict[str, Any]
30
+ Response = Any
31
+ ResponseInputItemParam = Dict[str, Any]
32
+ ResponseInputParam = Union[str, List[Dict[str, Any]]]
33
+ ResponseOutputItem = Dict[str, Any]
34
+ ResponseUsage = Dict[str, Any]
35
+ ToolParam = Dict[str, Any]
36
+ ResponseOutputMessageParam = Dict[str, Any]
37
+ RESPONSES_AVAILABLE = False
38
+
39
+ from openai._legacy_response import LegacyAPIResponse
40
+ from opentelemetry import context as context_api
41
+ from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
42
+ from opentelemetry.semconv_ai import SpanAttributes
43
+ from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE
44
+ from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import (
45
+ GEN_AI_COMPLETION,
46
+ GEN_AI_PROMPT,
47
+ GEN_AI_USAGE_INPUT_TOKENS,
48
+ GEN_AI_USAGE_OUTPUT_TOKENS,
49
+ GEN_AI_RESPONSE_ID,
50
+ GEN_AI_REQUEST_MODEL,
51
+ GEN_AI_RESPONSE_MODEL,
52
+ GEN_AI_SYSTEM,
53
+ )
54
+ from opentelemetry.trace import SpanKind, Span, StatusCode, Tracer
55
+ from typing import Any, Optional, Union
56
+ from typing_extensions import NotRequired
57
+
58
+ from ..shared import (
59
+ _set_span_attribute,
60
+ model_as_dict,
61
+ )
62
+
63
+ from ..utils import (
64
+ _with_tracer_wrapper,
65
+ dont_throw,
66
+ should_send_prompts,
67
+ )
68
+
69
+ SPAN_NAME = "openai.response"
70
+
71
+
72
+ def prepare_input_param(input_param: ResponseInputItemParam) -> ResponseInputItemParam:
73
+ """
74
+ Looks like OpenAI API infers the type "message" if the shape is correct,
75
+ but type is not specified.
76
+ It is marked as required on the message types. We add this to our
77
+ traced data to make it work.
78
+ """
79
+ try:
80
+ d = model_as_dict(input_param)
81
+ if "type" not in d:
82
+ d["type"] = "message"
83
+ if RESPONSES_AVAILABLE:
84
+ return ResponseInputItemParam(**d)
85
+ else:
86
+ return d
87
+ except Exception:
88
+ return input_param
89
+
90
+
91
+ def process_input(inp: ResponseInputParam) -> ResponseInputParam:
92
+ if not isinstance(inp, list):
93
+ return inp
94
+ return [prepare_input_param(item) for item in inp]
95
+
96
+
97
+ def is_validator_iterator(content):
98
+ """
99
+ Some OpenAI objects contain fields typed as Iterable, which pydantic
100
+ internally converts to a ValidatorIterator, and they cannot be trivially
101
+ serialized without consuming the iterator to, for example, a list.
102
+
103
+ See: https://github.com/pydantic/pydantic/issues/9541#issuecomment-2189045051
104
+ """
105
+ return re.search(r"pydantic.*ValidatorIterator'>$", str(type(content)))
106
+
107
+
108
+ # OpenAI API accepts output messages without an ID in its inputs, but
109
+ # the ID is marked as required in the output type.
110
+ if RESPONSES_AVAILABLE:
111
+
112
+ class ResponseOutputMessageParamWithoutId(ResponseOutputMessageParam):
113
+ id: NotRequired[str]
114
+
115
+ else:
116
+ # Fallback for older SDK versions
117
+ ResponseOutputMessageParamWithoutId = dict
118
+
119
+
120
+ class TracedData(pydantic.BaseModel):
121
+ start_time: float # time.time_ns()
122
+ response_id: str
123
+ # actually Union[str, list[Union[ResponseInputItemParam, ResponseOutputMessageParamWithoutId]]],
124
+ # but this only works properly in Python 3.10+ / newer pydantic
125
+ input: Any
126
+ # system message
127
+ instructions: Optional[str] = pydantic.Field(default=None)
128
+ # TODO: remove Any with newer Python / pydantic
129
+ tools: Optional[list[Union[Any, ToolParam]]] = pydantic.Field(default=None)
130
+ output_blocks: Optional[dict[str, ResponseOutputItem]] = pydantic.Field(
131
+ default=None
132
+ )
133
+ usage: Optional[ResponseUsage] = pydantic.Field(default=None)
134
+ output_text: Optional[str] = pydantic.Field(default=None)
135
+ request_model: Optional[str] = pydantic.Field(default=None)
136
+ response_model: Optional[str] = pydantic.Field(default=None)
137
+
138
+
139
+ responses: dict[str, TracedData] = {}
140
+
141
+
142
+ def parse_response(response: Union[LegacyAPIResponse, Response]) -> Response:
143
+ if isinstance(response, LegacyAPIResponse):
144
+ return response.parse()
145
+ return response
146
+
147
+
148
+ def get_tools_from_kwargs(kwargs: dict) -> list[ToolParam]:
149
+ tools_input = kwargs.get("tools", [])
150
+ tools = []
151
+
152
+ for tool in tools_input:
153
+ if tool.get("type") == "function":
154
+ if RESPONSES_AVAILABLE:
155
+ tools.append(FunctionToolParam(**tool))
156
+ else:
157
+ tools.append(tool)
158
+
159
+ return tools
160
+
161
+
162
+ def process_content_block(
163
+ block: dict[str, Any],
164
+ ) -> dict[str, Any]:
165
+ # TODO: keep the original type once backend supports it
166
+ if block.get("type") in ["text", "input_text", "output_text"]:
167
+ return {"type": "text", "text": block.get("text")}
168
+ elif block.get("type") in ["image", "input_image", "output_image"]:
169
+ return {
170
+ "type": "image",
171
+ "image_url": block.get("image_url"),
172
+ "detail": block.get("detail"),
173
+ "file_id": block.get("file_id"),
174
+ }
175
+ elif block.get("type") in ["file", "input_file", "output_file"]:
176
+ return {
177
+ "type": "file",
178
+ "file_id": block.get("file_id"),
179
+ "filename": block.get("filename"),
180
+ "file_data": block.get("file_data"),
181
+ }
182
+ return block
183
+
184
+
185
+ @dont_throw
186
+ def set_data_attributes(traced_response: TracedData, span: Span):
187
+ _set_span_attribute(span, GEN_AI_SYSTEM, "openai")
188
+ _set_span_attribute(span, GEN_AI_REQUEST_MODEL, traced_response.request_model)
189
+ _set_span_attribute(span, GEN_AI_RESPONSE_ID, traced_response.response_id)
190
+ _set_span_attribute(span, GEN_AI_RESPONSE_MODEL, traced_response.response_model)
191
+ if usage := traced_response.usage:
192
+ _set_span_attribute(span, GEN_AI_USAGE_INPUT_TOKENS, usage.input_tokens)
193
+ _set_span_attribute(span, GEN_AI_USAGE_OUTPUT_TOKENS, usage.output_tokens)
194
+ _set_span_attribute(
195
+ span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, usage.total_tokens
196
+ )
197
+ if usage.input_tokens_details:
198
+ _set_span_attribute(
199
+ span,
200
+ SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS,
201
+ usage.input_tokens_details.cached_tokens,
202
+ )
203
+ # TODO: add reasoning tokens in output token details
204
+
205
+ if should_send_prompts():
206
+ prompt_index = 0
207
+ if traced_response.tools:
208
+ for i, tool_param in enumerate(traced_response.tools):
209
+ tool_dict = model_as_dict(tool_param)
210
+ description = tool_dict.get("description")
211
+ parameters = tool_dict.get("parameters")
212
+ name = tool_dict.get("name")
213
+ if parameters is None:
214
+ continue
215
+ _set_span_attribute(
216
+ span,
217
+ f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.{i}.description",
218
+ description,
219
+ )
220
+ _set_span_attribute(
221
+ span,
222
+ f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.{i}.parameters",
223
+ json.dumps(parameters),
224
+ )
225
+ _set_span_attribute(
226
+ span,
227
+ f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.{i}.name",
228
+ name,
229
+ )
230
+ if traced_response.instructions:
231
+ _set_span_attribute(
232
+ span,
233
+ f"{GEN_AI_PROMPT}.{prompt_index}.content",
234
+ traced_response.instructions,
235
+ )
236
+ _set_span_attribute(span, f"{GEN_AI_PROMPT}.{prompt_index}.role", "system")
237
+ prompt_index += 1
238
+
239
+ if isinstance(traced_response.input, str):
240
+ _set_span_attribute(
241
+ span, f"{GEN_AI_PROMPT}.{prompt_index}.content", traced_response.input
242
+ )
243
+ _set_span_attribute(span, f"{GEN_AI_PROMPT}.{prompt_index}.role", "user")
244
+ prompt_index += 1
245
+ else:
246
+ for block in traced_response.input:
247
+ block_dict = model_as_dict(block)
248
+ if block_dict.get("type", "message") == "message":
249
+ content = block_dict.get("content")
250
+ if is_validator_iterator(content):
251
+ # we're after the actual call here, so we can consume the iterator
252
+ content = [process_content_block(block) for block in content]
253
+ try:
254
+ stringified_content = (
255
+ content if isinstance(content, str) else json.dumps(content)
256
+ )
257
+ except Exception:
258
+ stringified_content = (
259
+ str(content) if content is not None else ""
260
+ )
261
+ _set_span_attribute(
262
+ span,
263
+ f"{GEN_AI_PROMPT}.{prompt_index}.content",
264
+ stringified_content,
265
+ )
266
+ _set_span_attribute(
267
+ span,
268
+ f"{GEN_AI_PROMPT}.{prompt_index}.role",
269
+ block_dict.get("role"),
270
+ )
271
+ prompt_index += 1
272
+ elif block_dict.get("type") == "computer_call_output":
273
+ _set_span_attribute(
274
+ span, f"{GEN_AI_PROMPT}.{prompt_index}.role", "computer-call"
275
+ )
276
+ output_image_url = block_dict.get("output", {}).get("image_url")
277
+ if output_image_url:
278
+ _set_span_attribute(
279
+ span,
280
+ f"{GEN_AI_PROMPT}.{prompt_index}.content",
281
+ json.dumps(
282
+ [
283
+ {
284
+ "type": "image_url",
285
+ "image_url": {"url": output_image_url},
286
+ }
287
+ ]
288
+ ),
289
+ )
290
+ prompt_index += 1
291
+ elif block_dict.get("type") == "computer_call":
292
+ _set_span_attribute(
293
+ span, f"{GEN_AI_PROMPT}.{prompt_index}.role", "assistant"
294
+ )
295
+ call_content = {}
296
+ if block_dict.get("id"):
297
+ call_content["id"] = block_dict.get("id")
298
+ if block_dict.get("call_id"):
299
+ call_content["call_id"] = block_dict.get("call_id")
300
+ if block_dict.get("action"):
301
+ call_content["action"] = block_dict.get("action")
302
+ _set_span_attribute(
303
+ span,
304
+ f"{GEN_AI_PROMPT}.{prompt_index}.content",
305
+ json.dumps(call_content),
306
+ )
307
+ prompt_index += 1
308
+ # TODO: handle other block types
309
+
310
+ _set_span_attribute(span, f"{GEN_AI_COMPLETION}.0.role", "assistant")
311
+ if traced_response.output_text:
312
+ _set_span_attribute(
313
+ span, f"{GEN_AI_COMPLETION}.0.content", traced_response.output_text
314
+ )
315
+ tool_call_index = 0
316
+ for block in traced_response.output_blocks.values():
317
+ block_dict = model_as_dict(block)
318
+ if block_dict.get("type") == "message":
319
+ # either a refusal or handled in output_text above
320
+ continue
321
+ if block_dict.get("type") == "function_call":
322
+ _set_span_attribute(
323
+ span,
324
+ f"{GEN_AI_COMPLETION}.0.tool_calls.{tool_call_index}.id",
325
+ block_dict.get("id"),
326
+ )
327
+ _set_span_attribute(
328
+ span,
329
+ f"{GEN_AI_COMPLETION}.0.tool_calls.{tool_call_index}.name",
330
+ block_dict.get("name"),
331
+ )
332
+ _set_span_attribute(
333
+ span,
334
+ f"{GEN_AI_COMPLETION}.0.tool_calls.{tool_call_index}.arguments",
335
+ block_dict.get("arguments"),
336
+ )
337
+ tool_call_index += 1
338
+ elif block_dict.get("type") == "file_search_call":
339
+ _set_span_attribute(
340
+ span,
341
+ f"{GEN_AI_COMPLETION}.0.tool_calls.{tool_call_index}.id",
342
+ block_dict.get("id"),
343
+ )
344
+ _set_span_attribute(
345
+ span,
346
+ f"{GEN_AI_COMPLETION}.0.tool_calls.{tool_call_index}.name",
347
+ "file_search_call",
348
+ )
349
+ tool_call_index += 1
350
+ elif block_dict.get("type") == "web_search_call":
351
+ _set_span_attribute(
352
+ span,
353
+ f"{GEN_AI_COMPLETION}.0.tool_calls.{tool_call_index}.id",
354
+ block_dict.get("id"),
355
+ )
356
+ _set_span_attribute(
357
+ span,
358
+ f"{GEN_AI_COMPLETION}.0.tool_calls.{tool_call_index}.name",
359
+ "web_search_call",
360
+ )
361
+ tool_call_index += 1
362
+ elif block_dict.get("type") == "computer_call":
363
+ _set_span_attribute(
364
+ span,
365
+ f"{GEN_AI_COMPLETION}.0.tool_calls.{tool_call_index}.id",
366
+ block_dict.get("call_id"),
367
+ )
368
+ _set_span_attribute(
369
+ span,
370
+ f"{GEN_AI_COMPLETION}.0.tool_calls.{tool_call_index}.name",
371
+ "computer_call",
372
+ )
373
+ _set_span_attribute(
374
+ span,
375
+ f"{GEN_AI_COMPLETION}.0.tool_calls.{tool_call_index}.arguments",
376
+ json.dumps(block_dict.get("action")),
377
+ )
378
+ tool_call_index += 1
379
+ elif block_dict.get("type") == "reasoning":
380
+ _set_span_attribute(
381
+ span, f"{GEN_AI_COMPLETION}.0.reasoning", block_dict.get("summary")
382
+ )
383
+ # TODO: handle other block types, in particular other calls
384
+
385
+
386
+ @dont_throw
387
+ @_with_tracer_wrapper
388
+ def responses_get_or_create_wrapper(tracer: Tracer, wrapped, instance, args, kwargs):
389
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
390
+ return wrapped(*args, **kwargs)
391
+ start_time = time.time_ns()
392
+
393
+ try:
394
+ response = wrapped(*args, **kwargs)
395
+ if isinstance(response, Stream):
396
+ return response
397
+ except Exception as e:
398
+ response_id = kwargs.get("response_id")
399
+ existing_data = {}
400
+ if response_id and response_id in responses:
401
+ existing_data = responses[response_id].model_dump()
402
+ try:
403
+ traced_data = TracedData(
404
+ start_time=existing_data.get("start_time", start_time),
405
+ response_id=response_id or "",
406
+ input=process_input(
407
+ kwargs.get("input", existing_data.get("input", []))
408
+ ),
409
+ instructions=kwargs.get(
410
+ "instructions", existing_data.get("instructions")
411
+ ),
412
+ tools=get_tools_from_kwargs(kwargs) or existing_data.get("tools", []),
413
+ output_blocks=existing_data.get("output_blocks", {}),
414
+ usage=existing_data.get("usage"),
415
+ output_text=kwargs.get(
416
+ "output_text", existing_data.get("output_text", "")
417
+ ),
418
+ request_model=kwargs.get(
419
+ "model", existing_data.get("request_model", "")
420
+ ),
421
+ response_model=existing_data.get("response_model", ""),
422
+ )
423
+ except Exception:
424
+ traced_data = None
425
+
426
+ span = tracer.start_span(
427
+ SPAN_NAME,
428
+ kind=SpanKind.CLIENT,
429
+ start_time=(
430
+ start_time if traced_data is None else int(traced_data.start_time)
431
+ ),
432
+ )
433
+ span.set_attribute(ERROR_TYPE, e.__class__.__name__)
434
+ span.record_exception(e)
435
+ span.set_status(StatusCode.ERROR, str(e))
436
+ if traced_data:
437
+ set_data_attributes(traced_data, span)
438
+ span.end()
439
+ raise
440
+ parsed_response = parse_response(response)
441
+
442
+ existing_data = responses.get(parsed_response.id)
443
+ if existing_data is None:
444
+ existing_data = {}
445
+ else:
446
+ existing_data = existing_data.model_dump()
447
+
448
+ request_tools = get_tools_from_kwargs(kwargs)
449
+
450
+ merged_tools = existing_data.get("tools", []) + request_tools
451
+
452
+ try:
453
+ traced_data = TracedData(
454
+ start_time=existing_data.get("start_time", start_time),
455
+ response_id=parsed_response.id,
456
+ input=process_input(existing_data.get("input", kwargs.get("input"))),
457
+ instructions=existing_data.get("instructions", kwargs.get("instructions")),
458
+ tools=merged_tools if merged_tools else None,
459
+ output_blocks={block.id: block for block in parsed_response.output}
460
+ | existing_data.get("output_blocks", {}),
461
+ usage=existing_data.get("usage", parsed_response.usage),
462
+ output_text=existing_data.get("output_text", parsed_response.output_text),
463
+ request_model=existing_data.get("request_model", kwargs.get("model")),
464
+ response_model=existing_data.get("response_model", parsed_response.model),
465
+ )
466
+ responses[parsed_response.id] = traced_data
467
+ except Exception:
468
+ return response
469
+
470
+ if parsed_response.status == "completed":
471
+ span = tracer.start_span(
472
+ SPAN_NAME,
473
+ kind=SpanKind.CLIENT,
474
+ start_time=int(traced_data.start_time),
475
+ )
476
+ set_data_attributes(traced_data, span)
477
+ span.end()
478
+
479
+ return response
480
+
481
+
482
+ @dont_throw
483
+ @_with_tracer_wrapper
484
+ async def async_responses_get_or_create_wrapper(
485
+ tracer: Tracer, wrapped, instance, args, kwargs
486
+ ):
487
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
488
+ return await wrapped(*args, **kwargs)
489
+ start_time = time.time_ns()
490
+
491
+ try:
492
+ response = await wrapped(*args, **kwargs)
493
+ if isinstance(response, (Stream, AsyncStream)):
494
+ return response
495
+ except Exception as e:
496
+ response_id = kwargs.get("response_id")
497
+ existing_data = {}
498
+ if response_id and response_id in responses:
499
+ existing_data = responses[response_id].model_dump()
500
+ try:
501
+ traced_data = TracedData(
502
+ start_time=existing_data.get("start_time", start_time),
503
+ response_id=response_id or "",
504
+ input=process_input(
505
+ kwargs.get("input", existing_data.get("input", []))
506
+ ),
507
+ instructions=kwargs.get(
508
+ "instructions", existing_data.get("instructions", "")
509
+ ),
510
+ tools=get_tools_from_kwargs(kwargs) or existing_data.get("tools", []),
511
+ output_blocks=existing_data.get("output_blocks", {}),
512
+ usage=existing_data.get("usage"),
513
+ output_text=kwargs.get("output_text", existing_data.get("output_text")),
514
+ request_model=kwargs.get("model", existing_data.get("request_model")),
515
+ response_model=existing_data.get("response_model"),
516
+ )
517
+ except Exception:
518
+ traced_data = None
519
+
520
+ span = tracer.start_span(
521
+ SPAN_NAME,
522
+ kind=SpanKind.CLIENT,
523
+ start_time=(
524
+ start_time if traced_data is None else int(traced_data.start_time)
525
+ ),
526
+ )
527
+ span.set_attribute(ERROR_TYPE, e.__class__.__name__)
528
+ span.record_exception(e)
529
+ span.set_status(StatusCode.ERROR, str(e))
530
+ if traced_data:
531
+ set_data_attributes(traced_data, span)
532
+ span.end()
533
+ raise
534
+ parsed_response = parse_response(response)
535
+
536
+ existing_data = responses.get(parsed_response.id)
537
+ if existing_data is None:
538
+ existing_data = {}
539
+ else:
540
+ existing_data = existing_data.model_dump()
541
+
542
+ request_tools = get_tools_from_kwargs(kwargs)
543
+
544
+ merged_tools = existing_data.get("tools", []) + request_tools
545
+
546
+ try:
547
+ traced_data = TracedData(
548
+ start_time=existing_data.get("start_time", start_time),
549
+ response_id=parsed_response.id,
550
+ input=process_input(existing_data.get("input", kwargs.get("input"))),
551
+ instructions=existing_data.get("instructions", kwargs.get("instructions")),
552
+ tools=merged_tools if merged_tools else None,
553
+ output_blocks={block.id: block for block in parsed_response.output}
554
+ | existing_data.get("output_blocks", {}),
555
+ usage=existing_data.get("usage", parsed_response.usage),
556
+ output_text=existing_data.get("output_text", parsed_response.output_text),
557
+ request_model=existing_data.get("request_model", kwargs.get("model")),
558
+ response_model=existing_data.get("response_model", parsed_response.model),
559
+ )
560
+ responses[parsed_response.id] = traced_data
561
+ except Exception:
562
+ return response
563
+
564
+ if parsed_response.status == "completed":
565
+ span = tracer.start_span(
566
+ SPAN_NAME,
567
+ kind=SpanKind.CLIENT,
568
+ start_time=int(traced_data.start_time),
569
+ )
570
+ set_data_attributes(traced_data, span)
571
+ span.end()
572
+
573
+ return response
574
+
575
+
576
+ @dont_throw
577
+ @_with_tracer_wrapper
578
+ def responses_cancel_wrapper(tracer: Tracer, wrapped, instance, args, kwargs):
579
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
580
+ return wrapped(*args, **kwargs)
581
+
582
+ response = wrapped(*args, **kwargs)
583
+ if isinstance(response, Stream):
584
+ return response
585
+ parsed_response = parse_response(response)
586
+ existing_data = responses.pop(parsed_response.id, None)
587
+ if existing_data is not None:
588
+ span = tracer.start_span(
589
+ SPAN_NAME,
590
+ kind=SpanKind.CLIENT,
591
+ start_time=existing_data.start_time,
592
+ record_exception=True,
593
+ )
594
+ span.record_exception(Exception("Response cancelled"))
595
+ set_data_attributes(existing_data, span)
596
+ span.end()
597
+ return response
598
+
599
+
600
+ @dont_throw
601
+ @_with_tracer_wrapper
602
+ async def async_responses_cancel_wrapper(
603
+ tracer: Tracer, wrapped, instance, args, kwargs
604
+ ):
605
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
606
+ return await wrapped(*args, **kwargs)
607
+
608
+ response = await wrapped(*args, **kwargs)
609
+ if isinstance(response, (Stream, AsyncStream)):
610
+ return response
611
+ parsed_response = parse_response(response)
612
+ existing_data = responses.pop(parsed_response.id, None)
613
+ if existing_data is not None:
614
+ span = tracer.start_span(
615
+ SPAN_NAME,
616
+ kind=SpanKind.CLIENT,
617
+ start_time=existing_data.start_time,
618
+ record_exception=True,
619
+ )
620
+ span.record_exception(Exception("Response cancelled"))
621
+ set_data_attributes(existing_data, span)
622
+ span.end()
623
+ return response
624
+
625
+
626
+ # TODO: build streaming responses
@@ -0,0 +1 @@
1
+ __version__ = "0.40.14"
@@ -261,10 +261,8 @@ class OpenAIInstrumentorInitializer(InstrumentorInitializer):
261
261
  def init_instrumentor(self, *args, **kwargs) -> BaseInstrumentor | None:
262
262
  if not is_package_installed("openai"):
263
263
  return None
264
- if not is_package_installed("opentelemetry-instrumentation-openai"):
265
- return None
266
264
 
267
- from opentelemetry.instrumentation.openai import OpenAIInstrumentor
265
+ from ..opentelemetry.instrumentation.openai import OpenAIInstrumentor
268
266
 
269
267
  return OpenAIInstrumentor(
270
268
  # Default in the package provided is an empty function, which
@@ -19,7 +19,7 @@ except ImportError as e:
19
19
  "to install Browser Use or remove this import."
20
20
  ) from e
21
21
 
22
- _instruments = ("browser-use >= 0.1.0",)
22
+ _instruments = ("browser-use < 0.5.0",)
23
23
 
24
24
  WRAPPED_METHODS = [
25
25
  {
@@ -5,8 +5,6 @@ from lmnr.sdk.browser.playwright_otel import (
5
5
  _wrap_new_browser_async,
6
6
  _wrap_new_context_sync,
7
7
  _wrap_new_context_async,
8
- _wrap_close_browser_sync,
9
- _wrap_close_browser_async,
10
8
  )
11
9
  from lmnr.sdk.client.synchronous.sync_client import LaminarClient
12
10
  from lmnr.sdk.client.asynchronous.async_client import AsyncLaminarClient
@@ -50,12 +48,6 @@ WRAPPED_METHODS = [
50
48
  "method": "connect_over_cdp",
51
49
  "wrapper": _wrap_new_browser_sync,
52
50
  },
53
- {
54
- "package": "patchright.sync_api",
55
- "object": "Browser",
56
- "method": "close",
57
- "wrapper": _wrap_close_browser_sync,
58
- },
59
51
  {
60
52
  "package": "patchright.sync_api",
61
53
  "object": "Browser",
@@ -101,12 +93,6 @@ WRAPPED_METHODS_ASYNC = [
101
93
  "method": "connect_over_cdp",
102
94
  "wrapper": _wrap_new_browser_async,
103
95
  },
104
- {
105
- "package": "patchright.async_api",
106
- "object": "Browser",
107
- "method": "close",
108
- "wrapper": _wrap_close_browser_async,
109
- },
110
96
  {
111
97
  "package": "patchright.async_api",
112
98
  "object": "Browser",