opentelemetry-instrumentation-vertexai 0.47.3__py3-none-any.whl → 2.1b0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of opentelemetry-instrumentation-vertexai might be problematic. Click here for more details.
- opentelemetry/instrumentation/vertexai/__init__.py +150 -343
- opentelemetry/instrumentation/vertexai/events.py +190 -0
- opentelemetry/instrumentation/vertexai/package.py +16 -0
- opentelemetry/instrumentation/vertexai/patch.py +371 -0
- opentelemetry/instrumentation/vertexai/py.typed +0 -0
- opentelemetry/instrumentation/vertexai/utils.py +445 -29
- opentelemetry/instrumentation/vertexai/version.py +15 -1
- opentelemetry_instrumentation_vertexai-2.1b0.dist-info/METADATA +106 -0
- opentelemetry_instrumentation_vertexai-2.1b0.dist-info/RECORD +12 -0
- {opentelemetry_instrumentation_vertexai-0.47.3.dist-info → opentelemetry_instrumentation_vertexai-2.1b0.dist-info}/WHEEL +1 -1
- opentelemetry_instrumentation_vertexai-2.1b0.dist-info/entry_points.txt +2 -0
- opentelemetry_instrumentation_vertexai-2.1b0.dist-info/licenses/LICENSE +201 -0
- opentelemetry/instrumentation/vertexai/config.py +0 -9
- opentelemetry/instrumentation/vertexai/event_emitter.py +0 -164
- opentelemetry/instrumentation/vertexai/event_models.py +0 -41
- opentelemetry/instrumentation/vertexai/span_utils.py +0 -310
- opentelemetry_instrumentation_vertexai-0.47.3.dist-info/METADATA +0 -58
- opentelemetry_instrumentation_vertexai-0.47.3.dist-info/RECORD +0 -11
- opentelemetry_instrumentation_vertexai-0.47.3.dist-info/entry_points.txt +0 -3
|
@@ -1,366 +1,173 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
_instruments = ("google-cloud-aiplatform >= 1.38.1",)
|
|
37
|
-
|
|
38
|
-
WRAPPED_METHODS = [
|
|
39
|
-
{
|
|
40
|
-
"package": "vertexai.generative_models",
|
|
41
|
-
"object": "GenerativeModel",
|
|
42
|
-
"method": "generate_content",
|
|
43
|
-
"span_name": "vertexai.generate_content",
|
|
44
|
-
"is_async": False,
|
|
45
|
-
},
|
|
46
|
-
{
|
|
47
|
-
"package": "vertexai.generative_models",
|
|
48
|
-
"object": "GenerativeModel",
|
|
49
|
-
"method": "generate_content_async",
|
|
50
|
-
"span_name": "vertexai.generate_content_async",
|
|
51
|
-
"is_async": True,
|
|
52
|
-
},
|
|
53
|
-
{
|
|
54
|
-
"package": "vertexai.generative_models",
|
|
55
|
-
"object": "ChatSession",
|
|
56
|
-
"method": "send_message",
|
|
57
|
-
"span_name": "vertexai.send_message",
|
|
58
|
-
"is_async": False,
|
|
59
|
-
},
|
|
60
|
-
{
|
|
61
|
-
"package": "vertexai.preview.generative_models",
|
|
62
|
-
"object": "GenerativeModel",
|
|
63
|
-
"method": "generate_content",
|
|
64
|
-
"span_name": "vertexai.generate_content",
|
|
65
|
-
"is_async": False,
|
|
66
|
-
},
|
|
67
|
-
{
|
|
68
|
-
"package": "vertexai.preview.generative_models",
|
|
69
|
-
"object": "GenerativeModel",
|
|
70
|
-
"method": "generate_content_async",
|
|
71
|
-
"span_name": "vertexai.generate_content_async",
|
|
72
|
-
"is_async": True,
|
|
73
|
-
},
|
|
74
|
-
{
|
|
75
|
-
"package": "vertexai.preview.generative_models",
|
|
76
|
-
"object": "ChatSession",
|
|
77
|
-
"method": "send_message",
|
|
78
|
-
"span_name": "vertexai.send_message",
|
|
79
|
-
"is_async": False,
|
|
80
|
-
},
|
|
81
|
-
{
|
|
82
|
-
"package": "vertexai.language_models",
|
|
83
|
-
"object": "TextGenerationModel",
|
|
84
|
-
"method": "predict",
|
|
85
|
-
"span_name": "vertexai.predict",
|
|
86
|
-
"is_async": False,
|
|
87
|
-
},
|
|
88
|
-
{
|
|
89
|
-
"package": "vertexai.language_models",
|
|
90
|
-
"object": "TextGenerationModel",
|
|
91
|
-
"method": "predict_async",
|
|
92
|
-
"span_name": "vertexai.predict_async",
|
|
93
|
-
"is_async": True,
|
|
94
|
-
},
|
|
95
|
-
{
|
|
96
|
-
"package": "vertexai.language_models",
|
|
97
|
-
"object": "TextGenerationModel",
|
|
98
|
-
"method": "predict_streaming",
|
|
99
|
-
"span_name": "vertexai.predict_streaming",
|
|
100
|
-
"is_async": False,
|
|
101
|
-
},
|
|
102
|
-
{
|
|
103
|
-
"package": "vertexai.language_models",
|
|
104
|
-
"object": "TextGenerationModel",
|
|
105
|
-
"method": "predict_streaming_async",
|
|
106
|
-
"span_name": "vertexai.predict_streaming_async",
|
|
107
|
-
"is_async": True,
|
|
108
|
-
},
|
|
109
|
-
{
|
|
110
|
-
"package": "vertexai.language_models",
|
|
111
|
-
"object": "ChatSession",
|
|
112
|
-
"method": "send_message",
|
|
113
|
-
"span_name": "vertexai.send_message",
|
|
114
|
-
"is_async": False,
|
|
115
|
-
},
|
|
116
|
-
{
|
|
117
|
-
"package": "vertexai.language_models",
|
|
118
|
-
"object": "ChatSession",
|
|
119
|
-
"method": "send_message_streaming",
|
|
120
|
-
"span_name": "vertexai.send_message_streaming",
|
|
121
|
-
"is_async": False,
|
|
122
|
-
},
|
|
123
|
-
]
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
def is_streaming_response(response):
|
|
127
|
-
return isinstance(response, types.GeneratorType)
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
def is_async_streaming_response(response):
|
|
131
|
-
return isinstance(response, types.AsyncGeneratorType)
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
@dont_throw
|
|
135
|
-
def handle_streaming_response(span, event_logger, llm_model, response, token_usage):
|
|
136
|
-
set_model_response_attributes(span, llm_model, token_usage)
|
|
137
|
-
if should_emit_events():
|
|
138
|
-
emit_response_events(response, event_logger)
|
|
139
|
-
else:
|
|
140
|
-
set_response_attributes(span, llm_model, response)
|
|
141
|
-
if span.is_recording():
|
|
142
|
-
span.set_status(Status(StatusCode.OK))
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
def _build_from_streaming_response(span, event_logger, response, llm_model):
|
|
146
|
-
complete_response = ""
|
|
147
|
-
token_usage = None
|
|
148
|
-
for item in response:
|
|
149
|
-
item_to_yield = item
|
|
150
|
-
complete_response += str(item.text)
|
|
151
|
-
if item.usage_metadata:
|
|
152
|
-
token_usage = item.usage_metadata
|
|
153
|
-
|
|
154
|
-
yield item_to_yield
|
|
155
|
-
|
|
156
|
-
handle_streaming_response(
|
|
157
|
-
span, event_logger, llm_model, complete_response, token_usage
|
|
1
|
+
# Copyright The OpenTelemetry Authors
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
"""
|
|
16
|
+
VertexAI client instrumentation supporting `google-cloud-aiplatform` SDK, it can be enabled by
|
|
17
|
+
using ``VertexAIInstrumentor``.
|
|
18
|
+
|
|
19
|
+
.. _vertexai: https://pypi.org/project/google-cloud-aiplatform/
|
|
20
|
+
|
|
21
|
+
Usage
|
|
22
|
+
-----
|
|
23
|
+
|
|
24
|
+
.. code:: python
|
|
25
|
+
|
|
26
|
+
import vertexai
|
|
27
|
+
from vertexai.generative_models import GenerativeModel
|
|
28
|
+
from opentelemetry.instrumentation.vertexai import VertexAIInstrumentor
|
|
29
|
+
|
|
30
|
+
VertexAIInstrumentor().instrument()
|
|
31
|
+
|
|
32
|
+
vertexai.init()
|
|
33
|
+
model = GenerativeModel("gemini-1.5-flash-002")
|
|
34
|
+
chat_completion = model.generate_content(
|
|
35
|
+
"Write a short poem on OpenTelemetry."
|
|
158
36
|
)
|
|
159
37
|
|
|
160
|
-
|
|
161
|
-
|
|
38
|
+
API
|
|
39
|
+
---
|
|
40
|
+
"""
|
|
162
41
|
|
|
42
|
+
from __future__ import annotations
|
|
163
43
|
|
|
164
|
-
|
|
165
|
-
complete_response = ""
|
|
166
|
-
token_usage = None
|
|
167
|
-
async for item in response:
|
|
168
|
-
item_to_yield = item
|
|
169
|
-
complete_response += str(item.text)
|
|
170
|
-
if item.usage_metadata:
|
|
171
|
-
token_usage = item.usage_metadata
|
|
44
|
+
from typing import Any, Collection
|
|
172
45
|
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
span.set_status(Status(StatusCode.OK))
|
|
178
|
-
span.end()
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
@dont_throw
|
|
182
|
-
async def _handle_request(span, event_logger, args, kwargs, llm_model):
|
|
183
|
-
set_model_input_attributes(span, kwargs, llm_model)
|
|
184
|
-
if should_emit_events():
|
|
185
|
-
emit_prompt_events(args, event_logger)
|
|
186
|
-
else:
|
|
187
|
-
await set_input_attributes(span, args)
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
def _handle_response(span, event_logger, response, llm_model):
|
|
191
|
-
set_model_response_attributes(span, llm_model, response.usage_metadata)
|
|
192
|
-
if should_emit_events():
|
|
193
|
-
emit_response_events(response, event_logger)
|
|
194
|
-
else:
|
|
195
|
-
set_response_attributes(
|
|
196
|
-
span, llm_model, response.candidates[0].text if response.candidates else ""
|
|
197
|
-
)
|
|
198
|
-
if span.is_recording():
|
|
199
|
-
span.set_status(Status(StatusCode.OK))
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
def _with_tracer_wrapper(func):
|
|
203
|
-
"""Helper for providing tracer for wrapper functions."""
|
|
204
|
-
|
|
205
|
-
def _with_tracer(tracer, event_logger, to_wrap):
|
|
206
|
-
def wrapper(wrapped, instance, args, kwargs):
|
|
207
|
-
return func(tracer, event_logger, to_wrap, wrapped, instance, args, kwargs)
|
|
208
|
-
|
|
209
|
-
return wrapper
|
|
210
|
-
|
|
211
|
-
return _with_tracer
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
@_with_tracer_wrapper
|
|
215
|
-
async def _awrap(tracer, event_logger, to_wrap, wrapped, instance, args, kwargs):
|
|
216
|
-
"""Instruments and calls every function defined in TO_WRAP."""
|
|
217
|
-
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
|
|
218
|
-
SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
|
|
219
|
-
):
|
|
220
|
-
return await wrapped(*args, **kwargs)
|
|
221
|
-
|
|
222
|
-
llm_model = "unknown"
|
|
223
|
-
if hasattr(instance, "_model_id"):
|
|
224
|
-
llm_model = instance._model_id
|
|
225
|
-
if hasattr(instance, "_model_name"):
|
|
226
|
-
llm_model = instance._model_name.replace("publishers/google/models/", "")
|
|
227
|
-
# For ChatSession, try to get model from the parent model object
|
|
228
|
-
if hasattr(instance, "_model") and hasattr(instance._model, "_model_name"):
|
|
229
|
-
llm_model = instance._model._model_name.replace("publishers/google/models/", "")
|
|
230
|
-
elif hasattr(instance, "_model") and hasattr(instance._model, "_model_id"):
|
|
231
|
-
llm_model = instance._model._model_id
|
|
46
|
+
from wrapt import (
|
|
47
|
+
wrap_function_wrapper, # type: ignore[reportUnknownVariableType]
|
|
48
|
+
)
|
|
232
49
|
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
50
|
+
from opentelemetry._logs import get_logger
|
|
51
|
+
from opentelemetry.instrumentation._semconv import (
|
|
52
|
+
_OpenTelemetrySemanticConventionStability,
|
|
53
|
+
_OpenTelemetryStabilitySignalType,
|
|
54
|
+
_StabilityMode,
|
|
55
|
+
)
|
|
56
|
+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
|
57
|
+
from opentelemetry.instrumentation.utils import unwrap
|
|
58
|
+
from opentelemetry.instrumentation.vertexai.package import _instruments
|
|
59
|
+
from opentelemetry.instrumentation.vertexai.patch import MethodWrappers
|
|
60
|
+
from opentelemetry.instrumentation.vertexai.utils import is_content_enabled
|
|
61
|
+
from opentelemetry.semconv.schemas import Schemas
|
|
62
|
+
from opentelemetry.trace import get_tracer
|
|
63
|
+
from opentelemetry.util.genai.completion_hook import load_completion_hook
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def _methods_to_wrap(
|
|
67
|
+
method_wrappers: MethodWrappers,
|
|
68
|
+
):
|
|
69
|
+
# This import is very slow, do it lazily in case instrument() is not called
|
|
70
|
+
# pylint: disable=import-outside-toplevel
|
|
71
|
+
from google.cloud.aiplatform_v1.services.prediction_service import (
|
|
72
|
+
async_client,
|
|
73
|
+
client,
|
|
241
74
|
)
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
if response:
|
|
248
|
-
if is_streaming_response(response):
|
|
249
|
-
return _build_from_streaming_response(
|
|
250
|
-
span, event_logger, response, llm_model
|
|
251
|
-
)
|
|
252
|
-
elif is_async_streaming_response(response):
|
|
253
|
-
return _abuild_from_streaming_response(
|
|
254
|
-
span, event_logger, response, llm_model
|
|
255
|
-
)
|
|
256
|
-
else:
|
|
257
|
-
_handle_response(span, event_logger, response, llm_model)
|
|
258
|
-
|
|
259
|
-
span.end()
|
|
260
|
-
return response
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
@_with_tracer_wrapper
|
|
264
|
-
def _wrap(tracer, event_logger, to_wrap, wrapped, instance, args, kwargs):
|
|
265
|
-
"""Instruments and calls every function defined in TO_WRAP."""
|
|
266
|
-
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
|
|
267
|
-
SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
|
|
268
|
-
):
|
|
269
|
-
return wrapped(*args, **kwargs)
|
|
270
|
-
|
|
271
|
-
llm_model = "unknown"
|
|
272
|
-
if hasattr(instance, "_model_id"):
|
|
273
|
-
llm_model = instance._model_id
|
|
274
|
-
if hasattr(instance, "_model_name"):
|
|
275
|
-
llm_model = instance._model_name.replace("publishers/google/models/", "")
|
|
276
|
-
# For ChatSession, try to get model from the parent model object
|
|
277
|
-
if hasattr(instance, "_model") and hasattr(instance._model, "_model_name"):
|
|
278
|
-
llm_model = instance._model._model_name.replace("publishers/google/models/", "")
|
|
279
|
-
elif hasattr(instance, "_model") and hasattr(instance._model, "_model_id"):
|
|
280
|
-
llm_model = instance._model._model_id
|
|
281
|
-
|
|
282
|
-
name = to_wrap.get("span_name")
|
|
283
|
-
span = tracer.start_span(
|
|
284
|
-
name,
|
|
285
|
-
kind=SpanKind.CLIENT,
|
|
286
|
-
attributes={
|
|
287
|
-
SpanAttributes.LLM_SYSTEM: "Google",
|
|
288
|
-
SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
|
|
289
|
-
},
|
|
75
|
+
from google.cloud.aiplatform_v1beta1.services.prediction_service import (
|
|
76
|
+
async_client as async_client_v1beta1,
|
|
77
|
+
)
|
|
78
|
+
from google.cloud.aiplatform_v1beta1.services.prediction_service import (
|
|
79
|
+
client as client_v1beta1,
|
|
290
80
|
)
|
|
291
81
|
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
if response:
|
|
302
|
-
if is_streaming_response(response):
|
|
303
|
-
return _build_from_streaming_response(
|
|
304
|
-
span, event_logger, response, llm_model
|
|
305
|
-
)
|
|
306
|
-
elif is_async_streaming_response(response):
|
|
307
|
-
return _abuild_from_streaming_response(
|
|
308
|
-
span, event_logger, response, llm_model
|
|
309
|
-
)
|
|
310
|
-
else:
|
|
311
|
-
_handle_response(span, event_logger, response, llm_model)
|
|
82
|
+
for client_class in (
|
|
83
|
+
client.PredictionServiceClient,
|
|
84
|
+
client_v1beta1.PredictionServiceClient,
|
|
85
|
+
):
|
|
86
|
+
yield (
|
|
87
|
+
client_class,
|
|
88
|
+
client_class.generate_content.__name__, # type: ignore[reportUnknownMemberType]
|
|
89
|
+
method_wrappers.generate_content,
|
|
90
|
+
)
|
|
312
91
|
|
|
313
|
-
|
|
314
|
-
|
|
92
|
+
for client_class in (
|
|
93
|
+
async_client.PredictionServiceAsyncClient,
|
|
94
|
+
async_client_v1beta1.PredictionServiceAsyncClient,
|
|
95
|
+
):
|
|
96
|
+
yield (
|
|
97
|
+
client_class,
|
|
98
|
+
client_class.generate_content.__name__, # type: ignore[reportUnknownMemberType]
|
|
99
|
+
method_wrappers.agenerate_content,
|
|
100
|
+
)
|
|
315
101
|
|
|
316
102
|
|
|
317
103
|
class VertexAIInstrumentor(BaseInstrumentor):
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
def __init__(self, exception_logger=None, use_legacy_attributes=True, upload_base64_image=None):
|
|
104
|
+
def __init__(self) -> None:
|
|
321
105
|
super().__init__()
|
|
322
|
-
|
|
323
|
-
Config.use_legacy_attributes = use_legacy_attributes
|
|
324
|
-
if upload_base64_image:
|
|
325
|
-
Config.upload_base64_image = upload_base64_image
|
|
106
|
+
self._methods_to_unwrap: list[tuple[Any, str]] = []
|
|
326
107
|
|
|
327
108
|
def instrumentation_dependencies(self) -> Collection[str]:
|
|
328
109
|
return _instruments
|
|
329
110
|
|
|
330
|
-
def _instrument(self, **kwargs):
|
|
111
|
+
def _instrument(self, **kwargs: Any):
|
|
112
|
+
"""Enable VertexAI instrumentation."""
|
|
113
|
+
completion_hook = (
|
|
114
|
+
kwargs.get("completion_hook") or load_completion_hook()
|
|
115
|
+
)
|
|
116
|
+
sem_conv_opt_in_mode = _OpenTelemetrySemanticConventionStability._get_opentelemetry_stability_opt_in_mode(
|
|
117
|
+
_OpenTelemetryStabilitySignalType.GEN_AI,
|
|
118
|
+
)
|
|
331
119
|
tracer_provider = kwargs.get("tracer_provider")
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
120
|
+
schema = (
|
|
121
|
+
Schemas.V1_28_0.value
|
|
122
|
+
if sem_conv_opt_in_mode == _StabilityMode.DEFAULT
|
|
123
|
+
else Schemas.V1_36_0.value
|
|
124
|
+
)
|
|
125
|
+
tracer = get_tracer(
|
|
126
|
+
__name__,
|
|
127
|
+
"",
|
|
128
|
+
tracer_provider,
|
|
129
|
+
schema_url=schema,
|
|
130
|
+
)
|
|
131
|
+
logger_provider = kwargs.get("logger_provider")
|
|
132
|
+
logger = get_logger(
|
|
133
|
+
__name__,
|
|
134
|
+
"",
|
|
135
|
+
logger_provider=logger_provider,
|
|
136
|
+
schema_url=schema,
|
|
137
|
+
)
|
|
138
|
+
sem_conv_opt_in_mode = _OpenTelemetrySemanticConventionStability._get_opentelemetry_stability_opt_in_mode(
|
|
139
|
+
_OpenTelemetryStabilitySignalType.GEN_AI,
|
|
140
|
+
)
|
|
141
|
+
if sem_conv_opt_in_mode == _StabilityMode.DEFAULT:
|
|
142
|
+
# Type checker now knows sem_conv_opt_in_mode is a Literal[_StabilityMode.DEFAULT]
|
|
143
|
+
method_wrappers = MethodWrappers(
|
|
144
|
+
tracer,
|
|
145
|
+
logger,
|
|
146
|
+
is_content_enabled(sem_conv_opt_in_mode),
|
|
147
|
+
sem_conv_opt_in_mode,
|
|
148
|
+
completion_hook,
|
|
342
149
|
)
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
150
|
+
elif sem_conv_opt_in_mode == _StabilityMode.GEN_AI_LATEST_EXPERIMENTAL:
|
|
151
|
+
# Type checker now knows it's the other literal
|
|
152
|
+
method_wrappers = MethodWrappers(
|
|
153
|
+
tracer,
|
|
154
|
+
logger,
|
|
155
|
+
is_content_enabled(sem_conv_opt_in_mode),
|
|
156
|
+
sem_conv_opt_in_mode,
|
|
157
|
+
completion_hook,
|
|
158
|
+
)
|
|
159
|
+
else:
|
|
160
|
+
raise RuntimeError(f"{sem_conv_opt_in_mode} mode not supported")
|
|
161
|
+
for client_class, method_name, wrapper in _methods_to_wrap(
|
|
162
|
+
method_wrappers
|
|
163
|
+
):
|
|
349
164
|
wrap_function_wrapper(
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
_awrap(tracer, event_logger, wrapped_method)
|
|
354
|
-
if wrapped_method.get("is_async")
|
|
355
|
-
else _wrap(tracer, event_logger, wrapped_method)
|
|
356
|
-
),
|
|
165
|
+
client_class,
|
|
166
|
+
name=method_name,
|
|
167
|
+
wrapper=wrapper,
|
|
357
168
|
)
|
|
169
|
+
self._methods_to_unwrap.append((client_class, method_name))
|
|
358
170
|
|
|
359
|
-
def _uninstrument(self, **kwargs):
|
|
360
|
-
for
|
|
361
|
-
|
|
362
|
-
wrap_object = wrapped_method.get("object")
|
|
363
|
-
unwrap(
|
|
364
|
-
f"{wrap_package}.{wrap_object}",
|
|
365
|
-
wrapped_method.get("method", ""),
|
|
366
|
-
)
|
|
171
|
+
def _uninstrument(self, **kwargs: Any) -> None:
|
|
172
|
+
for client_class, method_name in self._methods_to_unwrap:
|
|
173
|
+
unwrap(client_class, method_name)
|