lmnr 0.6.20__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. lmnr/__init__.py +0 -4
  2. lmnr/opentelemetry_lib/decorators/__init__.py +211 -151
  3. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/__init__.py +678 -0
  4. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/config.py +13 -0
  5. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_emitter.py +211 -0
  6. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_models.py +41 -0
  7. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/span_utils.py +256 -0
  8. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/streaming.py +295 -0
  9. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/utils.py +179 -0
  10. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/version.py +1 -0
  11. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py +4 -0
  12. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/__init__.py +488 -0
  13. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/config.py +8 -0
  14. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_emitter.py +143 -0
  15. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_models.py +41 -0
  16. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/span_utils.py +229 -0
  17. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/utils.py +92 -0
  18. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/version.py +1 -0
  19. lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/__init__.py +16 -16
  20. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +3 -0
  21. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +3 -0
  22. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/utils.py +3 -3
  23. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +3 -0
  24. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/responses_wrappers.py +7 -0
  25. lmnr/opentelemetry_lib/opentelemetry/instrumentation/threading/__init__.py +190 -0
  26. lmnr/opentelemetry_lib/tracing/__init__.py +90 -2
  27. lmnr/opentelemetry_lib/tracing/_instrument_initializers.py +12 -7
  28. lmnr/opentelemetry_lib/tracing/context.py +109 -0
  29. lmnr/opentelemetry_lib/tracing/processor.py +6 -7
  30. lmnr/opentelemetry_lib/tracing/tracer.py +29 -0
  31. lmnr/opentelemetry_lib/utils/package_check.py +9 -0
  32. lmnr/sdk/browser/browser_use_otel.py +9 -7
  33. lmnr/sdk/browser/patchright_otel.py +14 -26
  34. lmnr/sdk/browser/playwright_otel.py +72 -73
  35. lmnr/sdk/browser/pw_utils.py +436 -119
  36. lmnr/sdk/client/asynchronous/resources/browser_events.py +1 -0
  37. lmnr/sdk/decorators.py +39 -4
  38. lmnr/sdk/evaluations.py +23 -9
  39. lmnr/sdk/laminar.py +181 -209
  40. lmnr/sdk/types.py +0 -6
  41. lmnr/version.py +1 -1
  42. {lmnr-0.6.20.dist-info → lmnr-0.7.0.dist-info}/METADATA +10 -8
  43. {lmnr-0.6.20.dist-info → lmnr-0.7.0.dist-info}/RECORD +45 -29
  44. {lmnr-0.6.20.dist-info → lmnr-0.7.0.dist-info}/WHEEL +1 -1
  45. lmnr/opentelemetry_lib/tracing/context_properties.py +0 -65
  46. {lmnr-0.6.20.dist-info → lmnr-0.7.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,678 @@
1
+ """OpenTelemetry Anthropic instrumentation"""
2
+
3
+ import logging
4
+ import time
5
+ from typing import Callable, Collection, Optional
6
+
7
+ from opentelemetry import context as context_api
8
+ from opentelemetry._events import EventLogger, get_event_logger
9
+ from .config import Config
10
+ from .event_emitter import (
11
+ emit_input_events,
12
+ emit_response_events,
13
+ )
14
+ from .span_utils import (
15
+ aset_input_attributes,
16
+ set_response_attributes,
17
+ )
18
+ from .streaming import (
19
+ abuild_from_streaming_response,
20
+ build_from_streaming_response,
21
+ )
22
+ from .utils import (
23
+ acount_prompt_tokens_from_request,
24
+ count_prompt_tokens_from_request,
25
+ dont_throw,
26
+ error_metrics_attributes,
27
+ run_async,
28
+ set_span_attribute,
29
+ shared_metrics_attributes,
30
+ should_emit_events,
31
+ )
32
+ from .version import __version__
33
+
34
+ from lmnr.opentelemetry_lib.tracing.context import get_current_context
35
+ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
36
+ from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY, unwrap
37
+ from opentelemetry.metrics import Counter, Histogram, Meter, get_meter
38
+ from opentelemetry.semconv_ai import (
39
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY,
40
+ LLMRequestTypeValues,
41
+ Meters,
42
+ SpanAttributes,
43
+ )
44
+ from opentelemetry.trace import Span, SpanKind, Tracer, get_tracer
45
+ from opentelemetry.trace.status import Status, StatusCode
46
+ from typing_extensions import Coroutine
47
+ from wrapt import wrap_function_wrapper
48
+
49
+ from anthropic._streaming import AsyncStream, Stream
50
+
51
+ logger = logging.getLogger(__name__)
52
+
53
+ _instruments = ("anthropic >= 0.3.11",)
54
+
55
+ WRAPPED_METHODS = [
56
+ {
57
+ "package": "anthropic.resources.completions",
58
+ "object": "Completions",
59
+ "method": "create",
60
+ "span_name": "anthropic.completion",
61
+ },
62
+ {
63
+ "package": "anthropic.resources.messages",
64
+ "object": "Messages",
65
+ "method": "create",
66
+ "span_name": "anthropic.chat",
67
+ },
68
+ {
69
+ "package": "anthropic.resources.messages",
70
+ "object": "Messages",
71
+ "method": "stream",
72
+ "span_name": "anthropic.chat",
73
+ },
74
+ ]
75
+
76
+ WRAPPED_AMETHODS = [
77
+ {
78
+ "package": "anthropic.resources.completions",
79
+ "object": "AsyncCompletions",
80
+ "method": "create",
81
+ "span_name": "anthropic.completion",
82
+ },
83
+ {
84
+ "package": "anthropic.resources.messages",
85
+ "object": "AsyncMessages",
86
+ "method": "create",
87
+ "span_name": "anthropic.chat",
88
+ },
89
+ {
90
+ "package": "anthropic.resources.messages",
91
+ "object": "AsyncMessages",
92
+ "method": "stream",
93
+ "span_name": "anthropic.chat",
94
+ },
95
+ ]
96
+
97
+
98
+ def is_streaming_response(response):
99
+ return isinstance(response, Stream) or isinstance(response, AsyncStream)
100
+
101
+
102
+ @dont_throw
103
+ async def _aset_token_usage(
104
+ span,
105
+ anthropic,
106
+ request,
107
+ response,
108
+ metric_attributes: dict = {},
109
+ token_histogram: Histogram = None,
110
+ choice_counter: Counter = None,
111
+ ):
112
+ if not isinstance(response, dict):
113
+ response = response.__dict__
114
+
115
+ if usage := response.get("usage"):
116
+ prompt_tokens = usage.input_tokens
117
+ cache_read_tokens = dict(usage).get("cache_read_input_tokens", 0) or 0
118
+ cache_creation_tokens = dict(usage).get("cache_creation_input_tokens", 0) or 0
119
+ else:
120
+ prompt_tokens = await acount_prompt_tokens_from_request(anthropic, request)
121
+ cache_read_tokens = 0
122
+ cache_creation_tokens = 0
123
+
124
+ input_tokens = prompt_tokens + cache_read_tokens + cache_creation_tokens
125
+
126
+ if token_histogram and isinstance(input_tokens, int) and input_tokens >= 0:
127
+ token_histogram.record(
128
+ input_tokens,
129
+ attributes={
130
+ **metric_attributes,
131
+ SpanAttributes.LLM_TOKEN_TYPE: "input",
132
+ },
133
+ )
134
+
135
+ if usage := response.get("usage"):
136
+ completion_tokens = usage.output_tokens
137
+ else:
138
+ completion_tokens = 0
139
+ if hasattr(anthropic, "count_tokens"):
140
+ if response.get("completion"):
141
+ completion_tokens = await anthropic.count_tokens(
142
+ response.get("completion")
143
+ )
144
+ elif response.get("content"):
145
+ completion_tokens = await anthropic.count_tokens(
146
+ response.get("content")[0].text
147
+ )
148
+
149
+ if (
150
+ token_histogram
151
+ and isinstance(completion_tokens, int)
152
+ and completion_tokens >= 0
153
+ ):
154
+ token_histogram.record(
155
+ completion_tokens,
156
+ attributes={
157
+ **metric_attributes,
158
+ SpanAttributes.LLM_TOKEN_TYPE: "output",
159
+ },
160
+ )
161
+
162
+ total_tokens = input_tokens + completion_tokens
163
+
164
+ choices = 0
165
+ if isinstance(response.get("content"), list):
166
+ choices = len(response.get("content"))
167
+ elif response.get("completion"):
168
+ choices = 1
169
+
170
+ if choices > 0 and choice_counter:
171
+ choice_counter.add(
172
+ choices,
173
+ attributes={
174
+ **metric_attributes,
175
+ SpanAttributes.LLM_RESPONSE_STOP_REASON: response.get("stop_reason"),
176
+ },
177
+ )
178
+
179
+ set_span_attribute(span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, input_tokens)
180
+ set_span_attribute(
181
+ span, SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, completion_tokens
182
+ )
183
+ set_span_attribute(span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, total_tokens)
184
+
185
+ set_span_attribute(
186
+ span, SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS, cache_read_tokens
187
+ )
188
+ set_span_attribute(
189
+ span,
190
+ SpanAttributes.LLM_USAGE_CACHE_CREATION_INPUT_TOKENS,
191
+ cache_creation_tokens,
192
+ )
193
+
194
+
195
+ @dont_throw
196
+ def _set_token_usage(
197
+ span,
198
+ anthropic,
199
+ request,
200
+ response,
201
+ metric_attributes: dict = {},
202
+ token_histogram: Histogram = None,
203
+ choice_counter: Counter = None,
204
+ ):
205
+ if not isinstance(response, dict):
206
+ response = response.__dict__
207
+
208
+ if usage := response.get("usage"):
209
+ prompt_tokens = usage.input_tokens
210
+ cache_read_tokens = dict(usage).get("cache_read_input_tokens", 0) or 0
211
+ cache_creation_tokens = dict(usage).get("cache_creation_input_tokens", 0) or 0
212
+ else:
213
+ prompt_tokens = count_prompt_tokens_from_request(anthropic, request)
214
+ cache_read_tokens = 0
215
+ cache_creation_tokens = 0
216
+
217
+ input_tokens = prompt_tokens + cache_read_tokens + cache_creation_tokens
218
+
219
+ if token_histogram and isinstance(input_tokens, int) and input_tokens >= 0:
220
+ token_histogram.record(
221
+ input_tokens,
222
+ attributes={
223
+ **metric_attributes,
224
+ SpanAttributes.LLM_TOKEN_TYPE: "input",
225
+ },
226
+ )
227
+
228
+ if usage := response.get("usage"):
229
+ completion_tokens = usage.output_tokens
230
+ else:
231
+ completion_tokens = 0
232
+ if hasattr(anthropic, "count_tokens"):
233
+ if response.get("completion"):
234
+ completion_tokens = anthropic.count_tokens(response.get("completion"))
235
+ elif response.get("content"):
236
+ completion_tokens = anthropic.count_tokens(
237
+ response.get("content")[0].text
238
+ )
239
+
240
+ if (
241
+ token_histogram
242
+ and isinstance(completion_tokens, int)
243
+ and completion_tokens >= 0
244
+ ):
245
+ token_histogram.record(
246
+ completion_tokens,
247
+ attributes={
248
+ **metric_attributes,
249
+ SpanAttributes.LLM_TOKEN_TYPE: "output",
250
+ },
251
+ )
252
+
253
+ total_tokens = input_tokens + completion_tokens
254
+
255
+ choices = 0
256
+ if isinstance(response.get("content"), list):
257
+ choices = len(response.get("content"))
258
+ elif response.get("completion"):
259
+ choices = 1
260
+
261
+ if choices > 0 and choice_counter:
262
+ choice_counter.add(
263
+ choices,
264
+ attributes={
265
+ **metric_attributes,
266
+ SpanAttributes.LLM_RESPONSE_STOP_REASON: response.get("stop_reason"),
267
+ },
268
+ )
269
+
270
+ set_span_attribute(span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, input_tokens)
271
+ set_span_attribute(
272
+ span, SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, completion_tokens
273
+ )
274
+ set_span_attribute(span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, total_tokens)
275
+
276
+ set_span_attribute(
277
+ span, SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS, cache_read_tokens
278
+ )
279
+ set_span_attribute(
280
+ span,
281
+ SpanAttributes.LLM_USAGE_CACHE_CREATION_INPUT_TOKENS,
282
+ cache_creation_tokens,
283
+ )
284
+
285
+
286
+ def _with_chat_telemetry_wrapper(func):
287
+ """Helper for providing tracer for wrapper functions. Includes metric collectors."""
288
+
289
+ def _with_chat_telemetry(
290
+ tracer,
291
+ token_histogram,
292
+ choice_counter,
293
+ duration_histogram,
294
+ exception_counter,
295
+ event_logger,
296
+ to_wrap,
297
+ ):
298
+ def wrapper(wrapped, instance, args, kwargs):
299
+ return func(
300
+ tracer,
301
+ token_histogram,
302
+ choice_counter,
303
+ duration_histogram,
304
+ exception_counter,
305
+ event_logger,
306
+ to_wrap,
307
+ wrapped,
308
+ instance,
309
+ args,
310
+ kwargs,
311
+ )
312
+
313
+ return wrapper
314
+
315
+ return _with_chat_telemetry
316
+
317
+
318
+ def _create_metrics(meter: Meter):
319
+ token_histogram = meter.create_histogram(
320
+ name=Meters.LLM_TOKEN_USAGE,
321
+ unit="token",
322
+ description="Measures number of input and output tokens used",
323
+ )
324
+
325
+ choice_counter = meter.create_counter(
326
+ name=Meters.LLM_GENERATION_CHOICES,
327
+ unit="choice",
328
+ description="Number of choices returned by chat completions call",
329
+ )
330
+
331
+ duration_histogram = meter.create_histogram(
332
+ name=Meters.LLM_OPERATION_DURATION,
333
+ unit="s",
334
+ description="GenAI operation duration",
335
+ )
336
+
337
+ exception_counter = meter.create_counter(
338
+ name=Meters.LLM_ANTHROPIC_COMPLETION_EXCEPTIONS,
339
+ unit="time",
340
+ description="Number of exceptions occurred during chat completions",
341
+ )
342
+
343
+ return token_histogram, choice_counter, duration_histogram, exception_counter
344
+
345
+
346
+ @dont_throw
347
+ def _handle_input(span: Span, event_logger: Optional[EventLogger], kwargs):
348
+ if should_emit_events() and event_logger:
349
+ emit_input_events(event_logger, kwargs)
350
+ else:
351
+ if not span.is_recording():
352
+ return
353
+ run_async(aset_input_attributes(span, kwargs))
354
+
355
+
356
+ @dont_throw
357
+ async def _ahandle_input(span: Span, event_logger: Optional[EventLogger], kwargs):
358
+ if should_emit_events() and event_logger:
359
+ emit_input_events(event_logger, kwargs)
360
+ else:
361
+ if not span.is_recording():
362
+ return
363
+ await aset_input_attributes(span, kwargs)
364
+
365
+
366
+ @dont_throw
367
+ def _handle_response(span: Span, event_logger: Optional[EventLogger], response):
368
+ if should_emit_events():
369
+ emit_response_events(event_logger, response)
370
+ else:
371
+ if not span.is_recording():
372
+ return
373
+ set_response_attributes(span, response)
374
+
375
+
376
+ @_with_chat_telemetry_wrapper
377
+ def _wrap(
378
+ tracer: Tracer,
379
+ token_histogram: Histogram,
380
+ choice_counter: Counter,
381
+ duration_histogram: Histogram,
382
+ exception_counter: Counter,
383
+ event_logger: Optional[EventLogger],
384
+ to_wrap,
385
+ wrapped,
386
+ instance,
387
+ args,
388
+ kwargs,
389
+ ):
390
+ """Instruments and calls every function defined in TO_WRAP."""
391
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
392
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
393
+ ):
394
+ return wrapped(*args, **kwargs)
395
+
396
+ name = to_wrap.get("span_name")
397
+ span = tracer.start_span(
398
+ name,
399
+ kind=SpanKind.CLIENT,
400
+ attributes={
401
+ SpanAttributes.LLM_SYSTEM: "anthropic",
402
+ SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
403
+ },
404
+ context=get_current_context(),
405
+ )
406
+
407
+ _handle_input(span, event_logger, kwargs)
408
+
409
+ start_time = time.time()
410
+ try:
411
+ response = wrapped(*args, **kwargs)
412
+ except Exception as e: # pylint: disable=broad-except
413
+ end_time = time.time()
414
+ attributes = error_metrics_attributes(e)
415
+
416
+ if duration_histogram:
417
+ duration = end_time - start_time
418
+ duration_histogram.record(duration, attributes=attributes)
419
+
420
+ if exception_counter:
421
+ exception_counter.add(1, attributes=attributes)
422
+
423
+ raise e
424
+
425
+ end_time = time.time()
426
+
427
+ if is_streaming_response(response):
428
+ return build_from_streaming_response(
429
+ span,
430
+ response,
431
+ instance._client,
432
+ start_time,
433
+ token_histogram,
434
+ choice_counter,
435
+ duration_histogram,
436
+ exception_counter,
437
+ event_logger,
438
+ kwargs,
439
+ )
440
+ elif response:
441
+ try:
442
+ metric_attributes = shared_metrics_attributes(response)
443
+
444
+ if duration_histogram:
445
+ duration = time.time() - start_time
446
+ duration_histogram.record(
447
+ duration,
448
+ attributes=metric_attributes,
449
+ )
450
+
451
+ _handle_response(span, event_logger, response)
452
+ if span.is_recording():
453
+ _set_token_usage(
454
+ span,
455
+ instance._client,
456
+ kwargs,
457
+ response,
458
+ metric_attributes,
459
+ token_histogram,
460
+ choice_counter,
461
+ )
462
+ except Exception as ex: # pylint: disable=broad-except
463
+ logger.warning(
464
+ "Failed to set response attributes for anthropic span, error: %s",
465
+ str(ex),
466
+ )
467
+
468
+ if span.is_recording():
469
+ span.set_status(Status(StatusCode.OK))
470
+ span.end()
471
+ return response
472
+
473
+
474
+ @_with_chat_telemetry_wrapper
475
+ async def _awrap(
476
+ tracer,
477
+ token_histogram: Histogram,
478
+ choice_counter: Counter,
479
+ duration_histogram: Histogram,
480
+ exception_counter: Counter,
481
+ event_logger: Optional[EventLogger],
482
+ to_wrap,
483
+ wrapped,
484
+ instance,
485
+ args,
486
+ kwargs,
487
+ ):
488
+ """Instruments and calls every function defined in TO_WRAP."""
489
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
490
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
491
+ ):
492
+ return await wrapped(*args, **kwargs)
493
+
494
+ name = to_wrap.get("span_name")
495
+ span = tracer.start_span(
496
+ name,
497
+ kind=SpanKind.CLIENT,
498
+ attributes={
499
+ SpanAttributes.LLM_SYSTEM: "anthropic",
500
+ SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
501
+ },
502
+ context=get_current_context(),
503
+ )
504
+ await _ahandle_input(span, event_logger, kwargs)
505
+
506
+ start_time = time.time()
507
+ try:
508
+ response = await wrapped(*args, **kwargs)
509
+ except Exception as e: # pylint: disable=broad-except
510
+ end_time = time.time()
511
+ attributes = error_metrics_attributes(e)
512
+
513
+ if duration_histogram:
514
+ duration = end_time - start_time
515
+ duration_histogram.record(duration, attributes=attributes)
516
+
517
+ if exception_counter:
518
+ exception_counter.add(1, attributes=attributes)
519
+
520
+ raise e
521
+
522
+ if is_streaming_response(response):
523
+ return abuild_from_streaming_response(
524
+ span,
525
+ response,
526
+ instance._client,
527
+ start_time,
528
+ token_histogram,
529
+ choice_counter,
530
+ duration_histogram,
531
+ exception_counter,
532
+ event_logger,
533
+ kwargs,
534
+ )
535
+ elif response:
536
+ metric_attributes = shared_metrics_attributes(response)
537
+
538
+ if duration_histogram:
539
+ duration = time.time() - start_time
540
+ duration_histogram.record(
541
+ duration,
542
+ attributes=metric_attributes,
543
+ )
544
+
545
+ _handle_response(span, event_logger, response)
546
+
547
+ if span.is_recording():
548
+ await _aset_token_usage(
549
+ span,
550
+ instance._client,
551
+ kwargs,
552
+ response,
553
+ metric_attributes,
554
+ token_histogram,
555
+ choice_counter,
556
+ )
557
+ span.set_status(Status(StatusCode.OK))
558
+ span.end()
559
+ return response
560
+
561
+
562
+ def is_metrics_enabled() -> bool:
563
+ return False
564
+
565
+
566
+ class AnthropicInstrumentor(BaseInstrumentor):
567
+ """An instrumentor for Anthropic's client library."""
568
+
569
+ def __init__(
570
+ self,
571
+ enrich_token_usage: bool = False,
572
+ exception_logger=None,
573
+ use_legacy_attributes: bool = True,
574
+ get_common_metrics_attributes: Callable[[], dict] = lambda: {},
575
+ upload_base64_image: Optional[
576
+ Callable[[str, str, str, str], Coroutine[None, None, str]]
577
+ ] = None,
578
+ ):
579
+ super().__init__()
580
+ Config.exception_logger = exception_logger
581
+ Config.enrich_token_usage = enrich_token_usage
582
+ Config.get_common_metrics_attributes = get_common_metrics_attributes
583
+ Config.upload_base64_image = upload_base64_image
584
+ Config.use_legacy_attributes = use_legacy_attributes
585
+
586
+ def instrumentation_dependencies(self) -> Collection[str]:
587
+ return _instruments
588
+
589
+ def _instrument(self, **kwargs):
590
+ tracer_provider = kwargs.get("tracer_provider")
591
+ tracer = get_tracer(__name__, __version__, tracer_provider)
592
+
593
+ # meter and counters are inited here
594
+ meter_provider = kwargs.get("meter_provider")
595
+ meter = get_meter(__name__, __version__, meter_provider)
596
+
597
+ if is_metrics_enabled():
598
+ (
599
+ token_histogram,
600
+ choice_counter,
601
+ duration_histogram,
602
+ exception_counter,
603
+ ) = _create_metrics(meter)
604
+ else:
605
+ (
606
+ token_histogram,
607
+ choice_counter,
608
+ duration_histogram,
609
+ exception_counter,
610
+ ) = (None, None, None, None)
611
+
612
+ # event_logger is inited here
613
+ event_logger = None
614
+
615
+ if not Config.use_legacy_attributes:
616
+ event_logger_provider = kwargs.get("event_logger_provider")
617
+ event_logger = get_event_logger(
618
+ __name__, __version__, event_logger_provider=event_logger_provider
619
+ )
620
+
621
+ for wrapped_method in WRAPPED_METHODS:
622
+ wrap_package = wrapped_method.get("package")
623
+ wrap_object = wrapped_method.get("object")
624
+ wrap_method = wrapped_method.get("method")
625
+
626
+ try:
627
+ wrap_function_wrapper(
628
+ wrap_package,
629
+ f"{wrap_object}.{wrap_method}",
630
+ _wrap(
631
+ tracer,
632
+ token_histogram,
633
+ choice_counter,
634
+ duration_histogram,
635
+ exception_counter,
636
+ event_logger,
637
+ wrapped_method,
638
+ ),
639
+ )
640
+ except ModuleNotFoundError:
641
+ pass # that's ok, we don't want to fail if some methods do not exist
642
+
643
+ for wrapped_method in WRAPPED_AMETHODS:
644
+ wrap_package = wrapped_method.get("package")
645
+ wrap_object = wrapped_method.get("object")
646
+ wrap_method = wrapped_method.get("method")
647
+ try:
648
+ wrap_function_wrapper(
649
+ wrap_package,
650
+ f"{wrap_object}.{wrap_method}",
651
+ _awrap(
652
+ tracer,
653
+ token_histogram,
654
+ choice_counter,
655
+ duration_histogram,
656
+ exception_counter,
657
+ event_logger,
658
+ wrapped_method,
659
+ ),
660
+ )
661
+ except ModuleNotFoundError:
662
+ pass # that's ok, we don't want to fail if some methods do not exist
663
+
664
+ def _uninstrument(self, **kwargs):
665
+ for wrapped_method in WRAPPED_METHODS:
666
+ wrap_package = wrapped_method.get("package")
667
+ wrap_object = wrapped_method.get("object")
668
+ unwrap(
669
+ f"{wrap_package}.{wrap_object}",
670
+ wrapped_method.get("method"),
671
+ )
672
+ for wrapped_method in WRAPPED_AMETHODS:
673
+ wrap_package = wrapped_method.get("package")
674
+ wrap_object = wrapped_method.get("object")
675
+ unwrap(
676
+ f"{wrap_package}.{wrap_object}",
677
+ wrapped_method.get("method"),
678
+ )
@@ -0,0 +1,13 @@
1
+ from typing import Callable, Optional
2
+
3
+ from typing_extensions import Coroutine
4
+
5
+
6
+ class Config:
7
+ enrich_token_usage = False
8
+ exception_logger = None
9
+ get_common_metrics_attributes: Callable[[], dict] = lambda: {}
10
+ upload_base64_image: Optional[
11
+ Callable[[str, str, str, str], Coroutine[None, None, str]]
12
+ ] = None
13
+ use_legacy_attributes = True