lmnr 0.6.16__py3-none-any.whl → 0.7.26__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lmnr/__init__.py +6 -15
- lmnr/cli/__init__.py +270 -0
- lmnr/cli/datasets.py +371 -0
- lmnr/{cli.py → cli/evals.py} +20 -102
- lmnr/cli/rules.py +42 -0
- lmnr/opentelemetry_lib/__init__.py +9 -2
- lmnr/opentelemetry_lib/decorators/__init__.py +274 -168
- lmnr/opentelemetry_lib/litellm/__init__.py +352 -38
- lmnr/opentelemetry_lib/litellm/utils.py +82 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/__init__.py +849 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/config.py +13 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_emitter.py +211 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_models.py +41 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/span_utils.py +401 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/streaming.py +425 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/utils.py +332 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/version.py +1 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/claude_agent/__init__.py +451 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/claude_agent/proxy.py +144 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_agent/__init__.py +100 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_computer/__init__.py +476 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_computer/utils.py +12 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py +191 -129
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/schema_utils.py +26 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/utils.py +126 -41
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/__init__.py +488 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/config.py +8 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_emitter.py +143 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_models.py +41 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/span_utils.py +229 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/utils.py +92 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/version.py +1 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/kernel/__init__.py +381 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/kernel/utils.py +36 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/__init__.py +16 -16
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/__init__.py +61 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/__init__.py +472 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +1185 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +305 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/config.py +16 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +312 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_emitter.py +100 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_models.py +41 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +68 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/utils.py +197 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v0/__init__.py +176 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/__init__.py +368 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +325 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +135 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/responses_wrappers.py +786 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/version.py +1 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openhands_ai/__init__.py +388 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/opentelemetry/__init__.py +69 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/skyvern/__init__.py +59 -61
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/threading/__init__.py +197 -0
- lmnr/opentelemetry_lib/tracing/__init__.py +119 -18
- lmnr/opentelemetry_lib/tracing/_instrument_initializers.py +124 -25
- lmnr/opentelemetry_lib/tracing/attributes.py +4 -0
- lmnr/opentelemetry_lib/tracing/context.py +200 -0
- lmnr/opentelemetry_lib/tracing/exporter.py +109 -15
- lmnr/opentelemetry_lib/tracing/instruments.py +22 -5
- lmnr/opentelemetry_lib/tracing/processor.py +128 -30
- lmnr/opentelemetry_lib/tracing/span.py +398 -0
- lmnr/opentelemetry_lib/tracing/tracer.py +40 -1
- lmnr/opentelemetry_lib/tracing/utils.py +62 -0
- lmnr/opentelemetry_lib/utils/package_check.py +9 -0
- lmnr/opentelemetry_lib/utils/wrappers.py +11 -0
- lmnr/sdk/browser/background_send_events.py +158 -0
- lmnr/sdk/browser/browser_use_cdp_otel.py +100 -0
- lmnr/sdk/browser/browser_use_otel.py +12 -12
- lmnr/sdk/browser/bubus_otel.py +71 -0
- lmnr/sdk/browser/cdp_utils.py +518 -0
- lmnr/sdk/browser/inject_script.js +514 -0
- lmnr/sdk/browser/patchright_otel.py +18 -44
- lmnr/sdk/browser/playwright_otel.py +104 -187
- lmnr/sdk/browser/pw_utils.py +249 -210
- lmnr/sdk/browser/recorder/record.umd.min.cjs +84 -0
- lmnr/sdk/browser/utils.py +1 -1
- lmnr/sdk/client/asynchronous/async_client.py +47 -15
- lmnr/sdk/client/asynchronous/resources/__init__.py +2 -7
- lmnr/sdk/client/asynchronous/resources/browser_events.py +1 -0
- lmnr/sdk/client/asynchronous/resources/datasets.py +131 -0
- lmnr/sdk/client/asynchronous/resources/evals.py +122 -18
- lmnr/sdk/client/asynchronous/resources/evaluators.py +85 -0
- lmnr/sdk/client/asynchronous/resources/tags.py +4 -10
- lmnr/sdk/client/synchronous/resources/__init__.py +2 -2
- lmnr/sdk/client/synchronous/resources/datasets.py +131 -0
- lmnr/sdk/client/synchronous/resources/evals.py +83 -17
- lmnr/sdk/client/synchronous/resources/evaluators.py +85 -0
- lmnr/sdk/client/synchronous/resources/tags.py +4 -10
- lmnr/sdk/client/synchronous/sync_client.py +47 -15
- lmnr/sdk/datasets/__init__.py +94 -0
- lmnr/sdk/datasets/file_utils.py +91 -0
- lmnr/sdk/decorators.py +103 -23
- lmnr/sdk/evaluations.py +122 -33
- lmnr/sdk/laminar.py +816 -333
- lmnr/sdk/log.py +7 -2
- lmnr/sdk/types.py +124 -143
- lmnr/sdk/utils.py +115 -2
- lmnr/version.py +1 -1
- {lmnr-0.6.16.dist-info → lmnr-0.7.26.dist-info}/METADATA +71 -78
- lmnr-0.7.26.dist-info/RECORD +116 -0
- lmnr-0.7.26.dist-info/WHEEL +4 -0
- lmnr-0.7.26.dist-info/entry_points.txt +3 -0
- lmnr/opentelemetry_lib/tracing/context_properties.py +0 -65
- lmnr/sdk/browser/rrweb/rrweb.umd.min.cjs +0 -98
- lmnr/sdk/client/asynchronous/resources/agent.py +0 -329
- lmnr/sdk/client/synchronous/resources/agent.py +0 -323
- lmnr/sdk/datasets.py +0 -60
- lmnr-0.6.16.dist-info/LICENSE +0 -75
- lmnr-0.6.16.dist-info/RECORD +0 -61
- lmnr-0.6.16.dist-info/WHEEL +0 -4
- lmnr-0.6.16.dist-info/entry_points.txt +0 -3
|
@@ -0,0 +1,849 @@
|
|
|
1
|
+
"""OpenTelemetry Anthropic instrumentation"""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import time
|
|
5
|
+
from typing import Callable, Collection, Optional
|
|
6
|
+
|
|
7
|
+
from opentelemetry import context as context_api
|
|
8
|
+
from opentelemetry._events import EventLogger, get_event_logger
|
|
9
|
+
from .config import Config
|
|
10
|
+
from .event_emitter import (
|
|
11
|
+
emit_input_events,
|
|
12
|
+
emit_response_events,
|
|
13
|
+
)
|
|
14
|
+
from .span_utils import (
|
|
15
|
+
aset_input_attributes,
|
|
16
|
+
aset_response_attributes,
|
|
17
|
+
set_response_attributes,
|
|
18
|
+
)
|
|
19
|
+
from .streaming import (
|
|
20
|
+
abuild_from_streaming_response,
|
|
21
|
+
build_from_streaming_response,
|
|
22
|
+
)
|
|
23
|
+
from .utils import (
|
|
24
|
+
acount_prompt_tokens_from_request,
|
|
25
|
+
ashared_metrics_attributes,
|
|
26
|
+
count_prompt_tokens_from_request,
|
|
27
|
+
dont_throw,
|
|
28
|
+
error_metrics_attributes,
|
|
29
|
+
run_async,
|
|
30
|
+
set_span_attribute,
|
|
31
|
+
shared_metrics_attributes,
|
|
32
|
+
should_emit_events,
|
|
33
|
+
)
|
|
34
|
+
from .streaming import (
|
|
35
|
+
WrappedAsyncMessageStreamManager,
|
|
36
|
+
WrappedMessageStreamManager,
|
|
37
|
+
)
|
|
38
|
+
from .version import __version__
|
|
39
|
+
|
|
40
|
+
from lmnr.opentelemetry_lib.tracing.context import get_current_context
|
|
41
|
+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
|
42
|
+
from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY, unwrap
|
|
43
|
+
from opentelemetry.metrics import Counter, Histogram, Meter, get_meter
|
|
44
|
+
from opentelemetry.semconv_ai import (
|
|
45
|
+
SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY,
|
|
46
|
+
LLMRequestTypeValues,
|
|
47
|
+
Meters,
|
|
48
|
+
SpanAttributes,
|
|
49
|
+
)
|
|
50
|
+
from opentelemetry.trace import Span, SpanKind, Tracer, get_tracer
|
|
51
|
+
from opentelemetry.trace.status import Status, StatusCode
|
|
52
|
+
from typing_extensions import Coroutine
|
|
53
|
+
from wrapt import wrap_function_wrapper
|
|
54
|
+
|
|
55
|
+
from anthropic._streaming import AsyncStream, Stream
|
|
56
|
+
|
|
57
|
+
logger = logging.getLogger(__name__)
|
|
58
|
+
|
|
59
|
+
_instruments = ("anthropic >= 0.3.11",)
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
WRAPPED_METHODS = [
|
|
63
|
+
{
|
|
64
|
+
"package": "anthropic.resources.completions",
|
|
65
|
+
"object": "Completions",
|
|
66
|
+
"method": "create",
|
|
67
|
+
"span_name": "anthropic.completion",
|
|
68
|
+
},
|
|
69
|
+
{
|
|
70
|
+
"package": "anthropic.resources.messages",
|
|
71
|
+
"object": "Messages",
|
|
72
|
+
"method": "create",
|
|
73
|
+
"span_name": "anthropic.chat",
|
|
74
|
+
},
|
|
75
|
+
{
|
|
76
|
+
"package": "anthropic.resources.messages",
|
|
77
|
+
"object": "Messages",
|
|
78
|
+
"method": "stream",
|
|
79
|
+
"span_name": "anthropic.chat",
|
|
80
|
+
},
|
|
81
|
+
# This method is on an async resource, but is meant to be called as
|
|
82
|
+
# an async context manager (async with), which we don't need to await;
|
|
83
|
+
# thus, we wrap it with a sync wrapper
|
|
84
|
+
{
|
|
85
|
+
"package": "anthropic.resources.messages",
|
|
86
|
+
"object": "AsyncMessages",
|
|
87
|
+
"method": "stream",
|
|
88
|
+
"span_name": "anthropic.chat",
|
|
89
|
+
},
|
|
90
|
+
# Beta API methods (regular Anthropic SDK)
|
|
91
|
+
{
|
|
92
|
+
"package": "anthropic.resources.beta.messages.messages",
|
|
93
|
+
"object": "Messages",
|
|
94
|
+
"method": "create",
|
|
95
|
+
"span_name": "anthropic.chat",
|
|
96
|
+
},
|
|
97
|
+
{
|
|
98
|
+
"package": "anthropic.resources.beta.messages.messages",
|
|
99
|
+
"object": "Messages",
|
|
100
|
+
"method": "stream",
|
|
101
|
+
"span_name": "anthropic.chat",
|
|
102
|
+
},
|
|
103
|
+
# read note on async with above
|
|
104
|
+
{
|
|
105
|
+
"package": "anthropic.resources.beta.messages.messages",
|
|
106
|
+
"object": "AsyncMessages",
|
|
107
|
+
"method": "stream",
|
|
108
|
+
"span_name": "anthropic.chat",
|
|
109
|
+
},
|
|
110
|
+
# Beta API methods (Bedrock SDK)
|
|
111
|
+
{
|
|
112
|
+
"package": "anthropic.lib.bedrock._beta_messages",
|
|
113
|
+
"object": "Messages",
|
|
114
|
+
"method": "create",
|
|
115
|
+
"span_name": "anthropic.chat",
|
|
116
|
+
},
|
|
117
|
+
{
|
|
118
|
+
"package": "anthropic.lib.bedrock._beta_messages",
|
|
119
|
+
"object": "Messages",
|
|
120
|
+
"method": "stream",
|
|
121
|
+
"span_name": "anthropic.chat",
|
|
122
|
+
},
|
|
123
|
+
# read note on async with above
|
|
124
|
+
{
|
|
125
|
+
"package": "anthropic.lib.bedrock._beta_messages",
|
|
126
|
+
"object": "AsyncMessages",
|
|
127
|
+
"method": "stream",
|
|
128
|
+
"span_name": "anthropic.chat",
|
|
129
|
+
},
|
|
130
|
+
]
|
|
131
|
+
|
|
132
|
+
WRAPPED_AMETHODS = [
|
|
133
|
+
{
|
|
134
|
+
"package": "anthropic.resources.completions",
|
|
135
|
+
"object": "AsyncCompletions",
|
|
136
|
+
"method": "create",
|
|
137
|
+
"span_name": "anthropic.completion",
|
|
138
|
+
},
|
|
139
|
+
{
|
|
140
|
+
"package": "anthropic.resources.messages",
|
|
141
|
+
"object": "AsyncMessages",
|
|
142
|
+
"method": "create",
|
|
143
|
+
"span_name": "anthropic.chat",
|
|
144
|
+
},
|
|
145
|
+
# Beta API async methods (regular Anthropic SDK)
|
|
146
|
+
{
|
|
147
|
+
"package": "anthropic.resources.beta.messages.messages",
|
|
148
|
+
"object": "AsyncMessages",
|
|
149
|
+
"method": "create",
|
|
150
|
+
"span_name": "anthropic.chat",
|
|
151
|
+
},
|
|
152
|
+
# Beta API async methods (Bedrock SDK)
|
|
153
|
+
{
|
|
154
|
+
"package": "anthropic.lib.bedrock._beta_messages",
|
|
155
|
+
"object": "AsyncMessages",
|
|
156
|
+
"method": "create",
|
|
157
|
+
"span_name": "anthropic.chat",
|
|
158
|
+
},
|
|
159
|
+
]
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
def is_streaming_response(response):
|
|
163
|
+
return isinstance(response, Stream) or isinstance(response, AsyncStream)
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
def is_stream_manager(response):
|
|
167
|
+
"""Check if response is a MessageStreamManager or AsyncMessageStreamManager"""
|
|
168
|
+
try:
|
|
169
|
+
from anthropic.lib.streaming._messages import (
|
|
170
|
+
MessageStreamManager,
|
|
171
|
+
AsyncMessageStreamManager,
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
return isinstance(response, (MessageStreamManager, AsyncMessageStreamManager))
|
|
175
|
+
except ImportError:
|
|
176
|
+
# Check by class name as fallback
|
|
177
|
+
return (
|
|
178
|
+
response.__class__.__name__ == "MessageStreamManager"
|
|
179
|
+
or response.__class__.__name__ == "AsyncMessageStreamManager"
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
@dont_throw
|
|
184
|
+
async def _aset_token_usage(
|
|
185
|
+
span,
|
|
186
|
+
anthropic,
|
|
187
|
+
request,
|
|
188
|
+
response,
|
|
189
|
+
metric_attributes: dict = {},
|
|
190
|
+
token_histogram: Histogram = None,
|
|
191
|
+
choice_counter: Counter = None,
|
|
192
|
+
):
|
|
193
|
+
# Handle with_raw_response wrapped responses first
|
|
194
|
+
if response and hasattr(response, "parse") and callable(response.parse):
|
|
195
|
+
try:
|
|
196
|
+
response = response.parse()
|
|
197
|
+
except Exception as e:
|
|
198
|
+
logger.debug(f"Failed to parse with_raw_response: {e}")
|
|
199
|
+
return
|
|
200
|
+
|
|
201
|
+
usage = getattr(response, "usage", None) if response else None
|
|
202
|
+
|
|
203
|
+
if usage:
|
|
204
|
+
prompt_tokens = getattr(usage, "input_tokens", 0)
|
|
205
|
+
cache_read_tokens = getattr(usage, "cache_read_input_tokens", 0) or 0
|
|
206
|
+
cache_creation_tokens = getattr(usage, "cache_creation_input_tokens", 0) or 0
|
|
207
|
+
else:
|
|
208
|
+
prompt_tokens = await acount_prompt_tokens_from_request(anthropic, request)
|
|
209
|
+
cache_read_tokens = 0
|
|
210
|
+
cache_creation_tokens = 0
|
|
211
|
+
|
|
212
|
+
input_tokens = prompt_tokens + cache_read_tokens + cache_creation_tokens
|
|
213
|
+
|
|
214
|
+
if token_histogram and isinstance(input_tokens, int) and input_tokens >= 0:
|
|
215
|
+
token_histogram.record(
|
|
216
|
+
input_tokens,
|
|
217
|
+
attributes={
|
|
218
|
+
**metric_attributes,
|
|
219
|
+
SpanAttributes.LLM_TOKEN_TYPE: "input",
|
|
220
|
+
},
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
if usage:
|
|
224
|
+
completion_tokens = getattr(usage, "output_tokens", 0)
|
|
225
|
+
else:
|
|
226
|
+
completion_tokens = 0
|
|
227
|
+
if hasattr(anthropic, "count_tokens"):
|
|
228
|
+
completion_attr = getattr(response, "completion", None)
|
|
229
|
+
content_attr = getattr(response, "content", None)
|
|
230
|
+
if completion_attr:
|
|
231
|
+
completion_tokens = await anthropic.count_tokens(completion_attr)
|
|
232
|
+
elif content_attr:
|
|
233
|
+
completion_tokens = await anthropic.count_tokens(content_attr[0].text)
|
|
234
|
+
|
|
235
|
+
if (
|
|
236
|
+
token_histogram
|
|
237
|
+
and isinstance(completion_tokens, int)
|
|
238
|
+
and completion_tokens >= 0
|
|
239
|
+
):
|
|
240
|
+
token_histogram.record(
|
|
241
|
+
completion_tokens,
|
|
242
|
+
attributes={
|
|
243
|
+
**metric_attributes,
|
|
244
|
+
SpanAttributes.LLM_TOKEN_TYPE: "output",
|
|
245
|
+
},
|
|
246
|
+
)
|
|
247
|
+
|
|
248
|
+
total_tokens = input_tokens + completion_tokens
|
|
249
|
+
|
|
250
|
+
choices = 0
|
|
251
|
+
content_attr = getattr(response, "content", None)
|
|
252
|
+
completion_attr = getattr(response, "completion", None)
|
|
253
|
+
if isinstance(content_attr, list):
|
|
254
|
+
choices = len(content_attr)
|
|
255
|
+
elif completion_attr:
|
|
256
|
+
choices = 1
|
|
257
|
+
|
|
258
|
+
if choices > 0 and choice_counter:
|
|
259
|
+
choice_counter.add(
|
|
260
|
+
choices,
|
|
261
|
+
attributes={
|
|
262
|
+
**metric_attributes,
|
|
263
|
+
SpanAttributes.LLM_RESPONSE_STOP_REASON: getattr(
|
|
264
|
+
response, "stop_reason", None
|
|
265
|
+
),
|
|
266
|
+
},
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
set_span_attribute(span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, input_tokens)
|
|
270
|
+
set_span_attribute(
|
|
271
|
+
span, SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, completion_tokens
|
|
272
|
+
)
|
|
273
|
+
set_span_attribute(span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, total_tokens)
|
|
274
|
+
|
|
275
|
+
set_span_attribute(
|
|
276
|
+
span, SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS, cache_read_tokens
|
|
277
|
+
)
|
|
278
|
+
set_span_attribute(
|
|
279
|
+
span,
|
|
280
|
+
SpanAttributes.LLM_USAGE_CACHE_CREATION_INPUT_TOKENS,
|
|
281
|
+
cache_creation_tokens,
|
|
282
|
+
)
|
|
283
|
+
|
|
284
|
+
|
|
285
|
+
@dont_throw
|
|
286
|
+
def _set_token_usage(
|
|
287
|
+
span,
|
|
288
|
+
anthropic,
|
|
289
|
+
request,
|
|
290
|
+
response,
|
|
291
|
+
metric_attributes: dict = {},
|
|
292
|
+
token_histogram: Histogram = None,
|
|
293
|
+
choice_counter: Counter = None,
|
|
294
|
+
):
|
|
295
|
+
# Handle with_raw_response wrapped responses first
|
|
296
|
+
if response and hasattr(response, "parse") and callable(response.parse):
|
|
297
|
+
try:
|
|
298
|
+
response = response.parse()
|
|
299
|
+
except Exception as e:
|
|
300
|
+
logger.debug(f"Failed to parse with_raw_response: {e}")
|
|
301
|
+
return
|
|
302
|
+
|
|
303
|
+
usage = getattr(response, "usage", None) if response else None
|
|
304
|
+
|
|
305
|
+
if usage:
|
|
306
|
+
prompt_tokens = getattr(usage, "input_tokens", 0)
|
|
307
|
+
cache_read_tokens = getattr(usage, "cache_read_input_tokens", 0) or 0
|
|
308
|
+
cache_creation_tokens = getattr(usage, "cache_creation_input_tokens", 0) or 0
|
|
309
|
+
else:
|
|
310
|
+
prompt_tokens = count_prompt_tokens_from_request(anthropic, request)
|
|
311
|
+
cache_read_tokens = 0
|
|
312
|
+
cache_creation_tokens = 0
|
|
313
|
+
|
|
314
|
+
input_tokens = prompt_tokens + cache_read_tokens + cache_creation_tokens
|
|
315
|
+
|
|
316
|
+
if token_histogram and isinstance(input_tokens, int) and input_tokens >= 0:
|
|
317
|
+
token_histogram.record(
|
|
318
|
+
input_tokens,
|
|
319
|
+
attributes={
|
|
320
|
+
**metric_attributes,
|
|
321
|
+
SpanAttributes.LLM_TOKEN_TYPE: "input",
|
|
322
|
+
},
|
|
323
|
+
)
|
|
324
|
+
|
|
325
|
+
if usage:
|
|
326
|
+
completion_tokens = getattr(usage, "output_tokens", 0)
|
|
327
|
+
else:
|
|
328
|
+
completion_tokens = 0
|
|
329
|
+
if hasattr(anthropic, "count_tokens"):
|
|
330
|
+
completion_attr = getattr(response, "completion", None)
|
|
331
|
+
content_attr = getattr(response, "content", None)
|
|
332
|
+
if completion_attr:
|
|
333
|
+
completion_tokens = anthropic.count_tokens(completion_attr)
|
|
334
|
+
elif content_attr:
|
|
335
|
+
completion_tokens = anthropic.count_tokens(content_attr[0].text)
|
|
336
|
+
|
|
337
|
+
if (
|
|
338
|
+
token_histogram
|
|
339
|
+
and isinstance(completion_tokens, int)
|
|
340
|
+
and completion_tokens >= 0
|
|
341
|
+
):
|
|
342
|
+
token_histogram.record(
|
|
343
|
+
completion_tokens,
|
|
344
|
+
attributes={
|
|
345
|
+
**metric_attributes,
|
|
346
|
+
SpanAttributes.LLM_TOKEN_TYPE: "output",
|
|
347
|
+
},
|
|
348
|
+
)
|
|
349
|
+
total_tokens = input_tokens + completion_tokens
|
|
350
|
+
choices = 0
|
|
351
|
+
|
|
352
|
+
content_attr = getattr(response, "content", None)
|
|
353
|
+
completion_attr = getattr(response, "completion", None)
|
|
354
|
+
if isinstance(content_attr, list):
|
|
355
|
+
choices = len(content_attr)
|
|
356
|
+
elif completion_attr:
|
|
357
|
+
choices = 1
|
|
358
|
+
if choices > 0 and choice_counter:
|
|
359
|
+
choice_counter.add(
|
|
360
|
+
choices,
|
|
361
|
+
attributes={
|
|
362
|
+
**metric_attributes,
|
|
363
|
+
SpanAttributes.LLM_RESPONSE_STOP_REASON: getattr(
|
|
364
|
+
response, "stop_reason", None
|
|
365
|
+
),
|
|
366
|
+
},
|
|
367
|
+
)
|
|
368
|
+
|
|
369
|
+
set_span_attribute(span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, input_tokens)
|
|
370
|
+
set_span_attribute(
|
|
371
|
+
span, SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, completion_tokens
|
|
372
|
+
)
|
|
373
|
+
set_span_attribute(span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, total_tokens)
|
|
374
|
+
|
|
375
|
+
set_span_attribute(
|
|
376
|
+
span, SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS, cache_read_tokens
|
|
377
|
+
)
|
|
378
|
+
set_span_attribute(
|
|
379
|
+
span,
|
|
380
|
+
SpanAttributes.LLM_USAGE_CACHE_CREATION_INPUT_TOKENS,
|
|
381
|
+
cache_creation_tokens,
|
|
382
|
+
)
|
|
383
|
+
|
|
384
|
+
|
|
385
|
+
def _with_chat_telemetry_wrapper(func):
|
|
386
|
+
"""Helper for providing tracer for wrapper functions. Includes metric collectors."""
|
|
387
|
+
|
|
388
|
+
def _with_chat_telemetry(
|
|
389
|
+
tracer,
|
|
390
|
+
token_histogram,
|
|
391
|
+
choice_counter,
|
|
392
|
+
duration_histogram,
|
|
393
|
+
exception_counter,
|
|
394
|
+
event_logger,
|
|
395
|
+
to_wrap,
|
|
396
|
+
):
|
|
397
|
+
def wrapper(wrapped, instance, args, kwargs):
|
|
398
|
+
return func(
|
|
399
|
+
tracer,
|
|
400
|
+
token_histogram,
|
|
401
|
+
choice_counter,
|
|
402
|
+
duration_histogram,
|
|
403
|
+
exception_counter,
|
|
404
|
+
event_logger,
|
|
405
|
+
to_wrap,
|
|
406
|
+
wrapped,
|
|
407
|
+
instance,
|
|
408
|
+
args,
|
|
409
|
+
kwargs,
|
|
410
|
+
)
|
|
411
|
+
|
|
412
|
+
return wrapper
|
|
413
|
+
|
|
414
|
+
return _with_chat_telemetry
|
|
415
|
+
|
|
416
|
+
|
|
417
|
+
def _create_metrics(meter: Meter):
|
|
418
|
+
token_histogram = meter.create_histogram(
|
|
419
|
+
name=Meters.LLM_TOKEN_USAGE,
|
|
420
|
+
unit="token",
|
|
421
|
+
description="Measures number of input and output tokens used",
|
|
422
|
+
)
|
|
423
|
+
|
|
424
|
+
choice_counter = meter.create_counter(
|
|
425
|
+
name=Meters.LLM_GENERATION_CHOICES,
|
|
426
|
+
unit="choice",
|
|
427
|
+
description="Number of choices returned by chat completions call",
|
|
428
|
+
)
|
|
429
|
+
|
|
430
|
+
duration_histogram = meter.create_histogram(
|
|
431
|
+
name=Meters.LLM_OPERATION_DURATION,
|
|
432
|
+
unit="s",
|
|
433
|
+
description="GenAI operation duration",
|
|
434
|
+
)
|
|
435
|
+
|
|
436
|
+
exception_counter = meter.create_counter(
|
|
437
|
+
name=Meters.LLM_ANTHROPIC_COMPLETION_EXCEPTIONS,
|
|
438
|
+
unit="time",
|
|
439
|
+
description="Number of exceptions occurred during chat completions",
|
|
440
|
+
)
|
|
441
|
+
|
|
442
|
+
return token_histogram, choice_counter, duration_histogram, exception_counter
|
|
443
|
+
|
|
444
|
+
|
|
445
|
+
@dont_throw
|
|
446
|
+
def _handle_input(span: Span, event_logger: Optional[EventLogger], kwargs):
|
|
447
|
+
if should_emit_events() and event_logger:
|
|
448
|
+
emit_input_events(event_logger, kwargs)
|
|
449
|
+
else:
|
|
450
|
+
if not span.is_recording():
|
|
451
|
+
return
|
|
452
|
+
run_async(aset_input_attributes(span, kwargs))
|
|
453
|
+
|
|
454
|
+
|
|
455
|
+
@dont_throw
|
|
456
|
+
async def _ahandle_input(span: Span, event_logger: Optional[EventLogger], kwargs):
|
|
457
|
+
if should_emit_events() and event_logger:
|
|
458
|
+
emit_input_events(event_logger, kwargs)
|
|
459
|
+
else:
|
|
460
|
+
if not span.is_recording():
|
|
461
|
+
return
|
|
462
|
+
await aset_input_attributes(span, kwargs)
|
|
463
|
+
|
|
464
|
+
|
|
465
|
+
@dont_throw
|
|
466
|
+
def _handle_response(span: Span, event_logger: Optional[EventLogger], response):
|
|
467
|
+
if should_emit_events():
|
|
468
|
+
emit_response_events(event_logger, response)
|
|
469
|
+
else:
|
|
470
|
+
if not span.is_recording():
|
|
471
|
+
return
|
|
472
|
+
set_response_attributes(span, response)
|
|
473
|
+
|
|
474
|
+
|
|
475
|
+
@dont_throw
|
|
476
|
+
async def _ahandle_response(span: Span, event_logger: Optional[EventLogger], response):
|
|
477
|
+
if should_emit_events():
|
|
478
|
+
emit_response_events(event_logger, response)
|
|
479
|
+
else:
|
|
480
|
+
if not span.is_recording():
|
|
481
|
+
return
|
|
482
|
+
|
|
483
|
+
await aset_response_attributes(span, response)
|
|
484
|
+
|
|
485
|
+
|
|
486
|
+
@_with_chat_telemetry_wrapper
|
|
487
|
+
def _wrap(
|
|
488
|
+
tracer: Tracer,
|
|
489
|
+
token_histogram: Histogram,
|
|
490
|
+
choice_counter: Counter,
|
|
491
|
+
duration_histogram: Histogram,
|
|
492
|
+
exception_counter: Counter,
|
|
493
|
+
event_logger: Optional[EventLogger],
|
|
494
|
+
to_wrap,
|
|
495
|
+
wrapped,
|
|
496
|
+
instance,
|
|
497
|
+
args,
|
|
498
|
+
kwargs,
|
|
499
|
+
):
|
|
500
|
+
"""Instruments and calls every function defined in TO_WRAP."""
|
|
501
|
+
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
|
|
502
|
+
SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
|
|
503
|
+
):
|
|
504
|
+
return wrapped(*args, **kwargs)
|
|
505
|
+
|
|
506
|
+
name = to_wrap.get("span_name")
|
|
507
|
+
span = tracer.start_span(
|
|
508
|
+
name,
|
|
509
|
+
kind=SpanKind.CLIENT,
|
|
510
|
+
attributes={
|
|
511
|
+
SpanAttributes.LLM_SYSTEM: "anthropic",
|
|
512
|
+
SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
|
|
513
|
+
},
|
|
514
|
+
context=get_current_context(),
|
|
515
|
+
)
|
|
516
|
+
|
|
517
|
+
_handle_input(span, event_logger, kwargs)
|
|
518
|
+
|
|
519
|
+
start_time = time.time()
|
|
520
|
+
try:
|
|
521
|
+
response = wrapped(*args, **kwargs)
|
|
522
|
+
except Exception as e: # pylint: disable=broad-except
|
|
523
|
+
end_time = time.time()
|
|
524
|
+
attributes = error_metrics_attributes(e)
|
|
525
|
+
|
|
526
|
+
if duration_histogram:
|
|
527
|
+
duration = end_time - start_time
|
|
528
|
+
duration_histogram.record(duration, attributes=attributes)
|
|
529
|
+
|
|
530
|
+
if exception_counter:
|
|
531
|
+
exception_counter.add(1, attributes=attributes)
|
|
532
|
+
|
|
533
|
+
raise e
|
|
534
|
+
|
|
535
|
+
end_time = time.time()
|
|
536
|
+
|
|
537
|
+
if is_streaming_response(response):
|
|
538
|
+
return build_from_streaming_response(
|
|
539
|
+
span,
|
|
540
|
+
response,
|
|
541
|
+
instance._client,
|
|
542
|
+
start_time,
|
|
543
|
+
token_histogram,
|
|
544
|
+
choice_counter,
|
|
545
|
+
duration_histogram,
|
|
546
|
+
exception_counter,
|
|
547
|
+
event_logger,
|
|
548
|
+
kwargs,
|
|
549
|
+
)
|
|
550
|
+
elif is_stream_manager(response):
|
|
551
|
+
if response.__class__.__name__ == "AsyncMessageStreamManager":
|
|
552
|
+
return WrappedAsyncMessageStreamManager(
|
|
553
|
+
response,
|
|
554
|
+
span,
|
|
555
|
+
instance._client,
|
|
556
|
+
start_time,
|
|
557
|
+
token_histogram,
|
|
558
|
+
choice_counter,
|
|
559
|
+
duration_histogram,
|
|
560
|
+
exception_counter,
|
|
561
|
+
event_logger,
|
|
562
|
+
kwargs,
|
|
563
|
+
)
|
|
564
|
+
else:
|
|
565
|
+
return WrappedMessageStreamManager(
|
|
566
|
+
response,
|
|
567
|
+
span,
|
|
568
|
+
instance._client,
|
|
569
|
+
start_time,
|
|
570
|
+
token_histogram,
|
|
571
|
+
choice_counter,
|
|
572
|
+
duration_histogram,
|
|
573
|
+
exception_counter,
|
|
574
|
+
event_logger,
|
|
575
|
+
kwargs,
|
|
576
|
+
)
|
|
577
|
+
elif response:
|
|
578
|
+
try:
|
|
579
|
+
metric_attributes = shared_metrics_attributes(response)
|
|
580
|
+
|
|
581
|
+
if duration_histogram:
|
|
582
|
+
duration = time.time() - start_time
|
|
583
|
+
duration_histogram.record(
|
|
584
|
+
duration,
|
|
585
|
+
attributes=metric_attributes,
|
|
586
|
+
)
|
|
587
|
+
|
|
588
|
+
_handle_response(span, event_logger, response)
|
|
589
|
+
if span.is_recording():
|
|
590
|
+
_set_token_usage(
|
|
591
|
+
span,
|
|
592
|
+
instance._client,
|
|
593
|
+
kwargs,
|
|
594
|
+
response,
|
|
595
|
+
metric_attributes,
|
|
596
|
+
token_histogram,
|
|
597
|
+
choice_counter,
|
|
598
|
+
)
|
|
599
|
+
except Exception as ex: # pylint: disable=broad-except
|
|
600
|
+
logger.warning(
|
|
601
|
+
"Failed to set response attributes for anthropic span, error: %s",
|
|
602
|
+
str(ex),
|
|
603
|
+
)
|
|
604
|
+
|
|
605
|
+
if span.is_recording():
|
|
606
|
+
span.set_status(Status(StatusCode.OK))
|
|
607
|
+
span.end()
|
|
608
|
+
return response
|
|
609
|
+
|
|
610
|
+
|
|
611
|
+
@_with_chat_telemetry_wrapper
|
|
612
|
+
async def _awrap(
|
|
613
|
+
tracer,
|
|
614
|
+
token_histogram: Histogram,
|
|
615
|
+
choice_counter: Counter,
|
|
616
|
+
duration_histogram: Histogram,
|
|
617
|
+
exception_counter: Counter,
|
|
618
|
+
event_logger: Optional[EventLogger],
|
|
619
|
+
to_wrap,
|
|
620
|
+
wrapped,
|
|
621
|
+
instance,
|
|
622
|
+
args,
|
|
623
|
+
kwargs,
|
|
624
|
+
):
|
|
625
|
+
"""Instruments and calls every function defined in TO_WRAP."""
|
|
626
|
+
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
|
|
627
|
+
SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
|
|
628
|
+
):
|
|
629
|
+
return await wrapped(*args, **kwargs)
|
|
630
|
+
|
|
631
|
+
name = to_wrap.get("span_name")
|
|
632
|
+
span = tracer.start_span(
|
|
633
|
+
name,
|
|
634
|
+
kind=SpanKind.CLIENT,
|
|
635
|
+
attributes={
|
|
636
|
+
SpanAttributes.LLM_SYSTEM: "anthropic",
|
|
637
|
+
SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
|
|
638
|
+
},
|
|
639
|
+
context=get_current_context(),
|
|
640
|
+
)
|
|
641
|
+
await _ahandle_input(span, event_logger, kwargs)
|
|
642
|
+
|
|
643
|
+
start_time = time.time()
|
|
644
|
+
try:
|
|
645
|
+
response = await wrapped(*args, **kwargs)
|
|
646
|
+
except Exception as e: # pylint: disable=broad-except
|
|
647
|
+
end_time = time.time()
|
|
648
|
+
attributes = error_metrics_attributes(e)
|
|
649
|
+
|
|
650
|
+
if duration_histogram:
|
|
651
|
+
duration = end_time - start_time
|
|
652
|
+
duration_histogram.record(duration, attributes=attributes)
|
|
653
|
+
|
|
654
|
+
if exception_counter:
|
|
655
|
+
exception_counter.add(1, attributes=attributes)
|
|
656
|
+
|
|
657
|
+
raise e
|
|
658
|
+
|
|
659
|
+
if is_streaming_response(response):
|
|
660
|
+
return abuild_from_streaming_response(
|
|
661
|
+
span,
|
|
662
|
+
response,
|
|
663
|
+
instance._client,
|
|
664
|
+
start_time,
|
|
665
|
+
token_histogram,
|
|
666
|
+
choice_counter,
|
|
667
|
+
duration_histogram,
|
|
668
|
+
exception_counter,
|
|
669
|
+
event_logger,
|
|
670
|
+
kwargs,
|
|
671
|
+
)
|
|
672
|
+
elif is_stream_manager(response):
|
|
673
|
+
if response.__class__.__name__ == "AsyncMessageStreamManager":
|
|
674
|
+
return WrappedAsyncMessageStreamManager(
|
|
675
|
+
response,
|
|
676
|
+
span,
|
|
677
|
+
instance._client,
|
|
678
|
+
start_time,
|
|
679
|
+
token_histogram,
|
|
680
|
+
choice_counter,
|
|
681
|
+
duration_histogram,
|
|
682
|
+
exception_counter,
|
|
683
|
+
event_logger,
|
|
684
|
+
kwargs,
|
|
685
|
+
)
|
|
686
|
+
else:
|
|
687
|
+
return WrappedMessageStreamManager(
|
|
688
|
+
response,
|
|
689
|
+
span,
|
|
690
|
+
instance._client,
|
|
691
|
+
start_time,
|
|
692
|
+
token_histogram,
|
|
693
|
+
choice_counter,
|
|
694
|
+
duration_histogram,
|
|
695
|
+
exception_counter,
|
|
696
|
+
event_logger,
|
|
697
|
+
kwargs,
|
|
698
|
+
)
|
|
699
|
+
elif response:
|
|
700
|
+
metric_attributes = await ashared_metrics_attributes(response)
|
|
701
|
+
|
|
702
|
+
if duration_histogram:
|
|
703
|
+
duration = time.time() - start_time
|
|
704
|
+
duration_histogram.record(
|
|
705
|
+
duration,
|
|
706
|
+
attributes=metric_attributes,
|
|
707
|
+
)
|
|
708
|
+
|
|
709
|
+
await _ahandle_response(span, event_logger, response)
|
|
710
|
+
|
|
711
|
+
if span.is_recording():
|
|
712
|
+
await _aset_token_usage(
|
|
713
|
+
span,
|
|
714
|
+
instance._client,
|
|
715
|
+
kwargs,
|
|
716
|
+
response,
|
|
717
|
+
metric_attributes,
|
|
718
|
+
token_histogram,
|
|
719
|
+
choice_counter,
|
|
720
|
+
)
|
|
721
|
+
span.set_status(Status(StatusCode.OK))
|
|
722
|
+
span.end()
|
|
723
|
+
return response
|
|
724
|
+
|
|
725
|
+
|
|
726
|
+
def is_metrics_enabled() -> bool:
|
|
727
|
+
return False
|
|
728
|
+
|
|
729
|
+
|
|
730
|
+
class AnthropicInstrumentor(BaseInstrumentor):
|
|
731
|
+
"""An instrumentor for Anthropic's client library."""
|
|
732
|
+
|
|
733
|
+
def __init__(
|
|
734
|
+
self,
|
|
735
|
+
enrich_token_usage: bool = False,
|
|
736
|
+
exception_logger=None,
|
|
737
|
+
use_legacy_attributes: bool = True,
|
|
738
|
+
get_common_metrics_attributes: Callable[[], dict] = lambda: {},
|
|
739
|
+
upload_base64_image: Optional[
|
|
740
|
+
Callable[[str, str, str, str], Coroutine[None, None, str]]
|
|
741
|
+
] = None,
|
|
742
|
+
):
|
|
743
|
+
super().__init__()
|
|
744
|
+
Config.exception_logger = exception_logger
|
|
745
|
+
Config.enrich_token_usage = enrich_token_usage
|
|
746
|
+
Config.get_common_metrics_attributes = get_common_metrics_attributes
|
|
747
|
+
Config.upload_base64_image = upload_base64_image
|
|
748
|
+
Config.use_legacy_attributes = use_legacy_attributes
|
|
749
|
+
|
|
750
|
+
def instrumentation_dependencies(self) -> Collection[str]:
|
|
751
|
+
return _instruments
|
|
752
|
+
|
|
753
|
+
def _instrument(self, **kwargs):
|
|
754
|
+
tracer_provider = kwargs.get("tracer_provider")
|
|
755
|
+
tracer = get_tracer(__name__, __version__, tracer_provider)
|
|
756
|
+
|
|
757
|
+
# meter and counters are inited here
|
|
758
|
+
meter_provider = kwargs.get("meter_provider")
|
|
759
|
+
meter = get_meter(__name__, __version__, meter_provider)
|
|
760
|
+
|
|
761
|
+
if is_metrics_enabled():
|
|
762
|
+
(
|
|
763
|
+
token_histogram,
|
|
764
|
+
choice_counter,
|
|
765
|
+
duration_histogram,
|
|
766
|
+
exception_counter,
|
|
767
|
+
) = _create_metrics(meter)
|
|
768
|
+
else:
|
|
769
|
+
(
|
|
770
|
+
token_histogram,
|
|
771
|
+
choice_counter,
|
|
772
|
+
duration_histogram,
|
|
773
|
+
exception_counter,
|
|
774
|
+
) = (None, None, None, None)
|
|
775
|
+
|
|
776
|
+
# event_logger is inited here
|
|
777
|
+
event_logger = None
|
|
778
|
+
|
|
779
|
+
if not Config.use_legacy_attributes:
|
|
780
|
+
event_logger_provider = kwargs.get("event_logger_provider")
|
|
781
|
+
event_logger = get_event_logger(
|
|
782
|
+
__name__, __version__, event_logger_provider=event_logger_provider
|
|
783
|
+
)
|
|
784
|
+
|
|
785
|
+
for wrapped_method in WRAPPED_METHODS:
|
|
786
|
+
wrap_package = wrapped_method.get("package")
|
|
787
|
+
wrap_object = wrapped_method.get("object")
|
|
788
|
+
wrap_method = wrapped_method.get("method")
|
|
789
|
+
|
|
790
|
+
try:
|
|
791
|
+
wrap_function_wrapper(
|
|
792
|
+
wrap_package,
|
|
793
|
+
f"{wrap_object}.{wrap_method}",
|
|
794
|
+
_wrap(
|
|
795
|
+
tracer,
|
|
796
|
+
token_histogram,
|
|
797
|
+
choice_counter,
|
|
798
|
+
duration_histogram,
|
|
799
|
+
exception_counter,
|
|
800
|
+
event_logger,
|
|
801
|
+
wrapped_method,
|
|
802
|
+
),
|
|
803
|
+
)
|
|
804
|
+
logger.debug(
|
|
805
|
+
f"Successfully wrapped {wrap_package}.{wrap_object}.{wrap_method}"
|
|
806
|
+
)
|
|
807
|
+
except Exception as e:
|
|
808
|
+
logger.debug(
|
|
809
|
+
f"Failed to wrap {wrap_package}.{wrap_object}.{wrap_method}: {e}"
|
|
810
|
+
)
|
|
811
|
+
except ModuleNotFoundError:
|
|
812
|
+
pass # that's ok, we don't want to fail if some methods do not exist
|
|
813
|
+
|
|
814
|
+
for wrapped_method in WRAPPED_AMETHODS:
|
|
815
|
+
wrap_package = wrapped_method.get("package")
|
|
816
|
+
wrap_object = wrapped_method.get("object")
|
|
817
|
+
wrap_method = wrapped_method.get("method")
|
|
818
|
+
try:
|
|
819
|
+
wrap_function_wrapper(
|
|
820
|
+
wrap_package,
|
|
821
|
+
f"{wrap_object}.{wrap_method}",
|
|
822
|
+
_awrap(
|
|
823
|
+
tracer,
|
|
824
|
+
token_histogram,
|
|
825
|
+
choice_counter,
|
|
826
|
+
duration_histogram,
|
|
827
|
+
exception_counter,
|
|
828
|
+
event_logger,
|
|
829
|
+
wrapped_method,
|
|
830
|
+
),
|
|
831
|
+
)
|
|
832
|
+
except Exception:
|
|
833
|
+
pass # that's ok, we don't want to fail if some methods do not exist
|
|
834
|
+
|
|
835
|
+
def _uninstrument(self, **kwargs):
|
|
836
|
+
for wrapped_method in WRAPPED_METHODS:
|
|
837
|
+
wrap_package = wrapped_method.get("package")
|
|
838
|
+
wrap_object = wrapped_method.get("object")
|
|
839
|
+
unwrap(
|
|
840
|
+
f"{wrap_package}.{wrap_object}",
|
|
841
|
+
wrapped_method.get("method"),
|
|
842
|
+
)
|
|
843
|
+
for wrapped_method in WRAPPED_AMETHODS:
|
|
844
|
+
wrap_package = wrapped_method.get("package")
|
|
845
|
+
wrap_object = wrapped_method.get("object")
|
|
846
|
+
unwrap(
|
|
847
|
+
f"{wrap_package}.{wrap_object}",
|
|
848
|
+
wrapped_method.get("method"),
|
|
849
|
+
)
|