lmnr 0.6.19__py3-none-any.whl → 0.6.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. lmnr/opentelemetry_lib/decorators/__init__.py +188 -138
  2. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/__init__.py +674 -0
  3. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/config.py +13 -0
  4. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_emitter.py +211 -0
  5. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_models.py +41 -0
  6. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/span_utils.py +256 -0
  7. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/streaming.py +295 -0
  8. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/utils.py +179 -0
  9. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/version.py +1 -0
  10. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/__init__.py +485 -0
  11. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/config.py +8 -0
  12. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_emitter.py +143 -0
  13. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_models.py +41 -0
  14. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/span_utils.py +229 -0
  15. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/utils.py +92 -0
  16. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/version.py +1 -0
  17. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/utils.py +3 -3
  18. lmnr/opentelemetry_lib/tracing/__init__.py +1 -1
  19. lmnr/opentelemetry_lib/tracing/_instrument_initializers.py +12 -7
  20. lmnr/opentelemetry_lib/tracing/processor.py +1 -1
  21. lmnr/opentelemetry_lib/utils/package_check.py +9 -0
  22. lmnr/sdk/browser/browser_use_otel.py +4 -2
  23. lmnr/sdk/browser/patchright_otel.py +0 -26
  24. lmnr/sdk/browser/playwright_otel.py +51 -78
  25. lmnr/sdk/browser/pw_utils.py +359 -114
  26. lmnr/sdk/client/asynchronous/async_client.py +13 -0
  27. lmnr/sdk/client/asynchronous/resources/__init__.py +2 -0
  28. lmnr/sdk/client/asynchronous/resources/evaluators.py +85 -0
  29. lmnr/sdk/client/asynchronous/resources/tags.py +4 -10
  30. lmnr/sdk/client/synchronous/resources/__init__.py +2 -1
  31. lmnr/sdk/client/synchronous/resources/evaluators.py +85 -0
  32. lmnr/sdk/client/synchronous/resources/tags.py +4 -10
  33. lmnr/sdk/client/synchronous/sync_client.py +14 -0
  34. lmnr/sdk/decorators.py +39 -4
  35. lmnr/sdk/evaluations.py +23 -9
  36. lmnr/sdk/laminar.py +75 -48
  37. lmnr/sdk/utils.py +23 -0
  38. lmnr/version.py +1 -1
  39. {lmnr-0.6.19.dist-info → lmnr-0.6.21.dist-info}/METADATA +8 -7
  40. {lmnr-0.6.19.dist-info → lmnr-0.6.21.dist-info}/RECORD +42 -25
  41. {lmnr-0.6.19.dist-info → lmnr-0.6.21.dist-info}/WHEEL +1 -1
  42. {lmnr-0.6.19.dist-info → lmnr-0.6.21.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,674 @@
1
+ """OpenTelemetry Anthropic instrumentation"""
2
+
3
+ import logging
4
+ import time
5
+ from typing import Callable, Collection, Optional
6
+
7
+ from opentelemetry import context as context_api
8
+ from opentelemetry._events import EventLogger, get_event_logger
9
+ from .config import Config
10
+ from .event_emitter import (
11
+ emit_input_events,
12
+ emit_response_events,
13
+ )
14
+ from .span_utils import (
15
+ aset_input_attributes,
16
+ set_response_attributes,
17
+ )
18
+ from .streaming import (
19
+ abuild_from_streaming_response,
20
+ build_from_streaming_response,
21
+ )
22
+ from .utils import (
23
+ acount_prompt_tokens_from_request,
24
+ count_prompt_tokens_from_request,
25
+ dont_throw,
26
+ error_metrics_attributes,
27
+ run_async,
28
+ set_span_attribute,
29
+ shared_metrics_attributes,
30
+ should_emit_events,
31
+ )
32
+ from .version import __version__
33
+ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
34
+ from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY, unwrap
35
+ from opentelemetry.metrics import Counter, Histogram, Meter, get_meter
36
+ from opentelemetry.semconv_ai import (
37
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY,
38
+ LLMRequestTypeValues,
39
+ Meters,
40
+ SpanAttributes,
41
+ )
42
+ from opentelemetry.trace import Span, SpanKind, Tracer, get_tracer
43
+ from opentelemetry.trace.status import Status, StatusCode
44
+ from typing_extensions import Coroutine
45
+ from wrapt import wrap_function_wrapper
46
+
47
+ from anthropic._streaming import AsyncStream, Stream
48
+
49
+ logger = logging.getLogger(__name__)
50
+
51
+ _instruments = ("anthropic >= 0.3.11",)
52
+
53
+ WRAPPED_METHODS = [
54
+ {
55
+ "package": "anthropic.resources.completions",
56
+ "object": "Completions",
57
+ "method": "create",
58
+ "span_name": "anthropic.completion",
59
+ },
60
+ {
61
+ "package": "anthropic.resources.messages",
62
+ "object": "Messages",
63
+ "method": "create",
64
+ "span_name": "anthropic.chat",
65
+ },
66
+ {
67
+ "package": "anthropic.resources.messages",
68
+ "object": "Messages",
69
+ "method": "stream",
70
+ "span_name": "anthropic.chat",
71
+ },
72
+ ]
73
+
74
+ WRAPPED_AMETHODS = [
75
+ {
76
+ "package": "anthropic.resources.completions",
77
+ "object": "AsyncCompletions",
78
+ "method": "create",
79
+ "span_name": "anthropic.completion",
80
+ },
81
+ {
82
+ "package": "anthropic.resources.messages",
83
+ "object": "AsyncMessages",
84
+ "method": "create",
85
+ "span_name": "anthropic.chat",
86
+ },
87
+ {
88
+ "package": "anthropic.resources.messages",
89
+ "object": "AsyncMessages",
90
+ "method": "stream",
91
+ "span_name": "anthropic.chat",
92
+ },
93
+ ]
94
+
95
+
96
+ def is_streaming_response(response):
97
+ return isinstance(response, Stream) or isinstance(response, AsyncStream)
98
+
99
+
100
+ @dont_throw
101
+ async def _aset_token_usage(
102
+ span,
103
+ anthropic,
104
+ request,
105
+ response,
106
+ metric_attributes: dict = {},
107
+ token_histogram: Histogram = None,
108
+ choice_counter: Counter = None,
109
+ ):
110
+ if not isinstance(response, dict):
111
+ response = response.__dict__
112
+
113
+ if usage := response.get("usage"):
114
+ prompt_tokens = usage.input_tokens
115
+ cache_read_tokens = dict(usage).get("cache_read_input_tokens", 0) or 0
116
+ cache_creation_tokens = dict(usage).get("cache_creation_input_tokens", 0) or 0
117
+ else:
118
+ prompt_tokens = await acount_prompt_tokens_from_request(anthropic, request)
119
+ cache_read_tokens = 0
120
+ cache_creation_tokens = 0
121
+
122
+ input_tokens = prompt_tokens + cache_read_tokens + cache_creation_tokens
123
+
124
+ if token_histogram and isinstance(input_tokens, int) and input_tokens >= 0:
125
+ token_histogram.record(
126
+ input_tokens,
127
+ attributes={
128
+ **metric_attributes,
129
+ SpanAttributes.LLM_TOKEN_TYPE: "input",
130
+ },
131
+ )
132
+
133
+ if usage := response.get("usage"):
134
+ completion_tokens = usage.output_tokens
135
+ else:
136
+ completion_tokens = 0
137
+ if hasattr(anthropic, "count_tokens"):
138
+ if response.get("completion"):
139
+ completion_tokens = await anthropic.count_tokens(
140
+ response.get("completion")
141
+ )
142
+ elif response.get("content"):
143
+ completion_tokens = await anthropic.count_tokens(
144
+ response.get("content")[0].text
145
+ )
146
+
147
+ if (
148
+ token_histogram
149
+ and isinstance(completion_tokens, int)
150
+ and completion_tokens >= 0
151
+ ):
152
+ token_histogram.record(
153
+ completion_tokens,
154
+ attributes={
155
+ **metric_attributes,
156
+ SpanAttributes.LLM_TOKEN_TYPE: "output",
157
+ },
158
+ )
159
+
160
+ total_tokens = input_tokens + completion_tokens
161
+
162
+ choices = 0
163
+ if isinstance(response.get("content"), list):
164
+ choices = len(response.get("content"))
165
+ elif response.get("completion"):
166
+ choices = 1
167
+
168
+ if choices > 0 and choice_counter:
169
+ choice_counter.add(
170
+ choices,
171
+ attributes={
172
+ **metric_attributes,
173
+ SpanAttributes.LLM_RESPONSE_STOP_REASON: response.get("stop_reason"),
174
+ },
175
+ )
176
+
177
+ set_span_attribute(span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, input_tokens)
178
+ set_span_attribute(
179
+ span, SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, completion_tokens
180
+ )
181
+ set_span_attribute(span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, total_tokens)
182
+
183
+ set_span_attribute(
184
+ span, SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS, cache_read_tokens
185
+ )
186
+ set_span_attribute(
187
+ span,
188
+ SpanAttributes.LLM_USAGE_CACHE_CREATION_INPUT_TOKENS,
189
+ cache_creation_tokens,
190
+ )
191
+
192
+
193
+ @dont_throw
194
+ def _set_token_usage(
195
+ span,
196
+ anthropic,
197
+ request,
198
+ response,
199
+ metric_attributes: dict = {},
200
+ token_histogram: Histogram = None,
201
+ choice_counter: Counter = None,
202
+ ):
203
+ if not isinstance(response, dict):
204
+ response = response.__dict__
205
+
206
+ if usage := response.get("usage"):
207
+ prompt_tokens = usage.input_tokens
208
+ cache_read_tokens = dict(usage).get("cache_read_input_tokens", 0) or 0
209
+ cache_creation_tokens = dict(usage).get("cache_creation_input_tokens", 0) or 0
210
+ else:
211
+ prompt_tokens = count_prompt_tokens_from_request(anthropic, request)
212
+ cache_read_tokens = 0
213
+ cache_creation_tokens = 0
214
+
215
+ input_tokens = prompt_tokens + cache_read_tokens + cache_creation_tokens
216
+
217
+ if token_histogram and isinstance(input_tokens, int) and input_tokens >= 0:
218
+ token_histogram.record(
219
+ input_tokens,
220
+ attributes={
221
+ **metric_attributes,
222
+ SpanAttributes.LLM_TOKEN_TYPE: "input",
223
+ },
224
+ )
225
+
226
+ if usage := response.get("usage"):
227
+ completion_tokens = usage.output_tokens
228
+ else:
229
+ completion_tokens = 0
230
+ if hasattr(anthropic, "count_tokens"):
231
+ if response.get("completion"):
232
+ completion_tokens = anthropic.count_tokens(response.get("completion"))
233
+ elif response.get("content"):
234
+ completion_tokens = anthropic.count_tokens(
235
+ response.get("content")[0].text
236
+ )
237
+
238
+ if (
239
+ token_histogram
240
+ and isinstance(completion_tokens, int)
241
+ and completion_tokens >= 0
242
+ ):
243
+ token_histogram.record(
244
+ completion_tokens,
245
+ attributes={
246
+ **metric_attributes,
247
+ SpanAttributes.LLM_TOKEN_TYPE: "output",
248
+ },
249
+ )
250
+
251
+ total_tokens = input_tokens + completion_tokens
252
+
253
+ choices = 0
254
+ if isinstance(response.get("content"), list):
255
+ choices = len(response.get("content"))
256
+ elif response.get("completion"):
257
+ choices = 1
258
+
259
+ if choices > 0 and choice_counter:
260
+ choice_counter.add(
261
+ choices,
262
+ attributes={
263
+ **metric_attributes,
264
+ SpanAttributes.LLM_RESPONSE_STOP_REASON: response.get("stop_reason"),
265
+ },
266
+ )
267
+
268
+ set_span_attribute(span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, input_tokens)
269
+ set_span_attribute(
270
+ span, SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, completion_tokens
271
+ )
272
+ set_span_attribute(span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, total_tokens)
273
+
274
+ set_span_attribute(
275
+ span, SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS, cache_read_tokens
276
+ )
277
+ set_span_attribute(
278
+ span,
279
+ SpanAttributes.LLM_USAGE_CACHE_CREATION_INPUT_TOKENS,
280
+ cache_creation_tokens,
281
+ )
282
+
283
+
284
+ def _with_chat_telemetry_wrapper(func):
285
+ """Helper for providing tracer for wrapper functions. Includes metric collectors."""
286
+
287
+ def _with_chat_telemetry(
288
+ tracer,
289
+ token_histogram,
290
+ choice_counter,
291
+ duration_histogram,
292
+ exception_counter,
293
+ event_logger,
294
+ to_wrap,
295
+ ):
296
+ def wrapper(wrapped, instance, args, kwargs):
297
+ return func(
298
+ tracer,
299
+ token_histogram,
300
+ choice_counter,
301
+ duration_histogram,
302
+ exception_counter,
303
+ event_logger,
304
+ to_wrap,
305
+ wrapped,
306
+ instance,
307
+ args,
308
+ kwargs,
309
+ )
310
+
311
+ return wrapper
312
+
313
+ return _with_chat_telemetry
314
+
315
+
316
+ def _create_metrics(meter: Meter):
317
+ token_histogram = meter.create_histogram(
318
+ name=Meters.LLM_TOKEN_USAGE,
319
+ unit="token",
320
+ description="Measures number of input and output tokens used",
321
+ )
322
+
323
+ choice_counter = meter.create_counter(
324
+ name=Meters.LLM_GENERATION_CHOICES,
325
+ unit="choice",
326
+ description="Number of choices returned by chat completions call",
327
+ )
328
+
329
+ duration_histogram = meter.create_histogram(
330
+ name=Meters.LLM_OPERATION_DURATION,
331
+ unit="s",
332
+ description="GenAI operation duration",
333
+ )
334
+
335
+ exception_counter = meter.create_counter(
336
+ name=Meters.LLM_ANTHROPIC_COMPLETION_EXCEPTIONS,
337
+ unit="time",
338
+ description="Number of exceptions occurred during chat completions",
339
+ )
340
+
341
+ return token_histogram, choice_counter, duration_histogram, exception_counter
342
+
343
+
344
+ @dont_throw
345
+ def _handle_input(span: Span, event_logger: Optional[EventLogger], kwargs):
346
+ if should_emit_events() and event_logger:
347
+ emit_input_events(event_logger, kwargs)
348
+ else:
349
+ if not span.is_recording():
350
+ return
351
+ run_async(aset_input_attributes(span, kwargs))
352
+
353
+
354
+ @dont_throw
355
+ async def _ahandle_input(span: Span, event_logger: Optional[EventLogger], kwargs):
356
+ if should_emit_events() and event_logger:
357
+ emit_input_events(event_logger, kwargs)
358
+ else:
359
+ if not span.is_recording():
360
+ return
361
+ await aset_input_attributes(span, kwargs)
362
+
363
+
364
+ @dont_throw
365
+ def _handle_response(span: Span, event_logger: Optional[EventLogger], response):
366
+ if should_emit_events():
367
+ emit_response_events(event_logger, response)
368
+ else:
369
+ if not span.is_recording():
370
+ return
371
+ set_response_attributes(span, response)
372
+
373
+
374
+ @_with_chat_telemetry_wrapper
375
+ def _wrap(
376
+ tracer: Tracer,
377
+ token_histogram: Histogram,
378
+ choice_counter: Counter,
379
+ duration_histogram: Histogram,
380
+ exception_counter: Counter,
381
+ event_logger: Optional[EventLogger],
382
+ to_wrap,
383
+ wrapped,
384
+ instance,
385
+ args,
386
+ kwargs,
387
+ ):
388
+ """Instruments and calls every function defined in TO_WRAP."""
389
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
390
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
391
+ ):
392
+ return wrapped(*args, **kwargs)
393
+
394
+ name = to_wrap.get("span_name")
395
+ span = tracer.start_span(
396
+ name,
397
+ kind=SpanKind.CLIENT,
398
+ attributes={
399
+ SpanAttributes.LLM_SYSTEM: "Anthropic",
400
+ SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
401
+ },
402
+ )
403
+
404
+ _handle_input(span, event_logger, kwargs)
405
+
406
+ start_time = time.time()
407
+ try:
408
+ response = wrapped(*args, **kwargs)
409
+ except Exception as e: # pylint: disable=broad-except
410
+ end_time = time.time()
411
+ attributes = error_metrics_attributes(e)
412
+
413
+ if duration_histogram:
414
+ duration = end_time - start_time
415
+ duration_histogram.record(duration, attributes=attributes)
416
+
417
+ if exception_counter:
418
+ exception_counter.add(1, attributes=attributes)
419
+
420
+ raise e
421
+
422
+ end_time = time.time()
423
+
424
+ if is_streaming_response(response):
425
+ return build_from_streaming_response(
426
+ span,
427
+ response,
428
+ instance._client,
429
+ start_time,
430
+ token_histogram,
431
+ choice_counter,
432
+ duration_histogram,
433
+ exception_counter,
434
+ event_logger,
435
+ kwargs,
436
+ )
437
+ elif response:
438
+ try:
439
+ metric_attributes = shared_metrics_attributes(response)
440
+
441
+ if duration_histogram:
442
+ duration = time.time() - start_time
443
+ duration_histogram.record(
444
+ duration,
445
+ attributes=metric_attributes,
446
+ )
447
+
448
+ _handle_response(span, event_logger, response)
449
+ if span.is_recording():
450
+ _set_token_usage(
451
+ span,
452
+ instance._client,
453
+ kwargs,
454
+ response,
455
+ metric_attributes,
456
+ token_histogram,
457
+ choice_counter,
458
+ )
459
+ except Exception as ex: # pylint: disable=broad-except
460
+ logger.warning(
461
+ "Failed to set response attributes for anthropic span, error: %s",
462
+ str(ex),
463
+ )
464
+
465
+ if span.is_recording():
466
+ span.set_status(Status(StatusCode.OK))
467
+ span.end()
468
+ return response
469
+
470
+
471
+ @_with_chat_telemetry_wrapper
472
+ async def _awrap(
473
+ tracer,
474
+ token_histogram: Histogram,
475
+ choice_counter: Counter,
476
+ duration_histogram: Histogram,
477
+ exception_counter: Counter,
478
+ event_logger: Optional[EventLogger],
479
+ to_wrap,
480
+ wrapped,
481
+ instance,
482
+ args,
483
+ kwargs,
484
+ ):
485
+ """Instruments and calls every function defined in TO_WRAP."""
486
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
487
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
488
+ ):
489
+ return await wrapped(*args, **kwargs)
490
+
491
+ name = to_wrap.get("span_name")
492
+ span = tracer.start_span(
493
+ name,
494
+ kind=SpanKind.CLIENT,
495
+ attributes={
496
+ SpanAttributes.LLM_SYSTEM: "Anthropic",
497
+ SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
498
+ },
499
+ )
500
+ await _ahandle_input(span, event_logger, kwargs)
501
+
502
+ start_time = time.time()
503
+ try:
504
+ response = await wrapped(*args, **kwargs)
505
+ except Exception as e: # pylint: disable=broad-except
506
+ end_time = time.time()
507
+ attributes = error_metrics_attributes(e)
508
+
509
+ if duration_histogram:
510
+ duration = end_time - start_time
511
+ duration_histogram.record(duration, attributes=attributes)
512
+
513
+ if exception_counter:
514
+ exception_counter.add(1, attributes=attributes)
515
+
516
+ raise e
517
+
518
+ if is_streaming_response(response):
519
+ return abuild_from_streaming_response(
520
+ span,
521
+ response,
522
+ instance._client,
523
+ start_time,
524
+ token_histogram,
525
+ choice_counter,
526
+ duration_histogram,
527
+ exception_counter,
528
+ event_logger,
529
+ kwargs,
530
+ )
531
+ elif response:
532
+ metric_attributes = shared_metrics_attributes(response)
533
+
534
+ if duration_histogram:
535
+ duration = time.time() - start_time
536
+ duration_histogram.record(
537
+ duration,
538
+ attributes=metric_attributes,
539
+ )
540
+
541
+ _handle_response(span, event_logger, response)
542
+
543
+ if span.is_recording():
544
+ await _aset_token_usage(
545
+ span,
546
+ instance._client,
547
+ kwargs,
548
+ response,
549
+ metric_attributes,
550
+ token_histogram,
551
+ choice_counter,
552
+ )
553
+ span.set_status(Status(StatusCode.OK))
554
+ span.end()
555
+ return response
556
+
557
+
558
+ def is_metrics_enabled() -> bool:
559
+ return False
560
+
561
+
562
+ class AnthropicInstrumentor(BaseInstrumentor):
563
+ """An instrumentor for Anthropic's client library."""
564
+
565
+ def __init__(
566
+ self,
567
+ enrich_token_usage: bool = False,
568
+ exception_logger=None,
569
+ use_legacy_attributes: bool = True,
570
+ get_common_metrics_attributes: Callable[[], dict] = lambda: {},
571
+ upload_base64_image: Optional[
572
+ Callable[[str, str, str, str], Coroutine[None, None, str]]
573
+ ] = None,
574
+ ):
575
+ super().__init__()
576
+ Config.exception_logger = exception_logger
577
+ Config.enrich_token_usage = enrich_token_usage
578
+ Config.get_common_metrics_attributes = get_common_metrics_attributes
579
+ Config.upload_base64_image = upload_base64_image
580
+ Config.use_legacy_attributes = use_legacy_attributes
581
+
582
+ def instrumentation_dependencies(self) -> Collection[str]:
583
+ return _instruments
584
+
585
+ def _instrument(self, **kwargs):
586
+ tracer_provider = kwargs.get("tracer_provider")
587
+ tracer = get_tracer(__name__, __version__, tracer_provider)
588
+
589
+ # meter and counters are inited here
590
+ meter_provider = kwargs.get("meter_provider")
591
+ meter = get_meter(__name__, __version__, meter_provider)
592
+
593
+ if is_metrics_enabled():
594
+ (
595
+ token_histogram,
596
+ choice_counter,
597
+ duration_histogram,
598
+ exception_counter,
599
+ ) = _create_metrics(meter)
600
+ else:
601
+ (
602
+ token_histogram,
603
+ choice_counter,
604
+ duration_histogram,
605
+ exception_counter,
606
+ ) = (None, None, None, None)
607
+
608
+ # event_logger is inited here
609
+ event_logger = None
610
+
611
+ if not Config.use_legacy_attributes:
612
+ event_logger_provider = kwargs.get("event_logger_provider")
613
+ event_logger = get_event_logger(
614
+ __name__, __version__, event_logger_provider=event_logger_provider
615
+ )
616
+
617
+ for wrapped_method in WRAPPED_METHODS:
618
+ wrap_package = wrapped_method.get("package")
619
+ wrap_object = wrapped_method.get("object")
620
+ wrap_method = wrapped_method.get("method")
621
+
622
+ try:
623
+ wrap_function_wrapper(
624
+ wrap_package,
625
+ f"{wrap_object}.{wrap_method}",
626
+ _wrap(
627
+ tracer,
628
+ token_histogram,
629
+ choice_counter,
630
+ duration_histogram,
631
+ exception_counter,
632
+ event_logger,
633
+ wrapped_method,
634
+ ),
635
+ )
636
+ except ModuleNotFoundError:
637
+ pass # that's ok, we don't want to fail if some methods do not exist
638
+
639
+ for wrapped_method in WRAPPED_AMETHODS:
640
+ wrap_package = wrapped_method.get("package")
641
+ wrap_object = wrapped_method.get("object")
642
+ wrap_method = wrapped_method.get("method")
643
+ try:
644
+ wrap_function_wrapper(
645
+ wrap_package,
646
+ f"{wrap_object}.{wrap_method}",
647
+ _awrap(
648
+ tracer,
649
+ token_histogram,
650
+ choice_counter,
651
+ duration_histogram,
652
+ exception_counter,
653
+ event_logger,
654
+ wrapped_method,
655
+ ),
656
+ )
657
+ except ModuleNotFoundError:
658
+ pass # that's ok, we don't want to fail if some methods do not exist
659
+
660
+ def _uninstrument(self, **kwargs):
661
+ for wrapped_method in WRAPPED_METHODS:
662
+ wrap_package = wrapped_method.get("package")
663
+ wrap_object = wrapped_method.get("object")
664
+ unwrap(
665
+ f"{wrap_package}.{wrap_object}",
666
+ wrapped_method.get("method"),
667
+ )
668
+ for wrapped_method in WRAPPED_AMETHODS:
669
+ wrap_package = wrapped_method.get("package")
670
+ wrap_object = wrapped_method.get("object")
671
+ unwrap(
672
+ f"{wrap_package}.{wrap_object}",
673
+ wrapped_method.get("method"),
674
+ )
@@ -0,0 +1,13 @@
1
+ from typing import Callable, Optional
2
+
3
+ from typing_extensions import Coroutine
4
+
5
+
6
+ class Config:
7
+ enrich_token_usage = False
8
+ exception_logger = None
9
+ get_common_metrics_attributes: Callable[[], dict] = lambda: {}
10
+ upload_base64_image: Optional[
11
+ Callable[[str, str, str, str], Coroutine[None, None, str]]
12
+ ] = None
13
+ use_legacy_attributes = True