opentelemetry-instrumentation-vertexai 0.49.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of opentelemetry-instrumentation-vertexai might be problematic. Click here for more details.
- opentelemetry/instrumentation/vertexai/__init__.py +369 -0
- opentelemetry/instrumentation/vertexai/config.py +9 -0
- opentelemetry/instrumentation/vertexai/event_emitter.py +173 -0
- opentelemetry/instrumentation/vertexai/event_models.py +41 -0
- opentelemetry/instrumentation/vertexai/span_utils.py +313 -0
- opentelemetry/instrumentation/vertexai/utils.py +43 -0
- opentelemetry/instrumentation/vertexai/version.py +1 -0
- opentelemetry_instrumentation_vertexai-0.49.0.dist-info/METADATA +58 -0
- opentelemetry_instrumentation_vertexai-0.49.0.dist-info/RECORD +11 -0
- opentelemetry_instrumentation_vertexai-0.49.0.dist-info/WHEEL +4 -0
- opentelemetry_instrumentation_vertexai-0.49.0.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,369 @@
|
|
|
1
|
+
"""OpenTelemetry Vertex AI instrumentation"""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import types
|
|
5
|
+
from typing import Collection
|
|
6
|
+
|
|
7
|
+
from opentelemetry import context as context_api
|
|
8
|
+
from opentelemetry._logs import get_logger
|
|
9
|
+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
|
10
|
+
from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY, unwrap
|
|
11
|
+
from opentelemetry.instrumentation.vertexai.config import Config
|
|
12
|
+
from opentelemetry.instrumentation.vertexai.event_emitter import (
|
|
13
|
+
emit_prompt_events,
|
|
14
|
+
emit_response_events,
|
|
15
|
+
)
|
|
16
|
+
from opentelemetry.instrumentation.vertexai.span_utils import (
|
|
17
|
+
set_input_attributes,
|
|
18
|
+
set_input_attributes_sync,
|
|
19
|
+
set_model_input_attributes,
|
|
20
|
+
set_model_response_attributes,
|
|
21
|
+
set_response_attributes,
|
|
22
|
+
)
|
|
23
|
+
from opentelemetry.instrumentation.vertexai.utils import dont_throw, should_emit_events
|
|
24
|
+
from opentelemetry.instrumentation.vertexai.version import __version__
|
|
25
|
+
from opentelemetry.semconv._incubating.attributes import (
|
|
26
|
+
gen_ai_attributes as GenAIAttributes,
|
|
27
|
+
)
|
|
28
|
+
from opentelemetry.semconv_ai import (
|
|
29
|
+
SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY,
|
|
30
|
+
LLMRequestTypeValues,
|
|
31
|
+
SpanAttributes,
|
|
32
|
+
)
|
|
33
|
+
from opentelemetry.trace import SpanKind, get_tracer
|
|
34
|
+
from opentelemetry.trace.status import Status, StatusCode
|
|
35
|
+
from wrapt import wrap_function_wrapper
|
|
36
|
+
|
|
37
|
+
logger = logging.getLogger(__name__)
|
|
38
|
+
|
|
39
|
+
_instruments = ("google-cloud-aiplatform >= 1.38.1",)
|
|
40
|
+
|
|
41
|
+
WRAPPED_METHODS = [
|
|
42
|
+
{
|
|
43
|
+
"package": "vertexai.generative_models",
|
|
44
|
+
"object": "GenerativeModel",
|
|
45
|
+
"method": "generate_content",
|
|
46
|
+
"span_name": "vertexai.generate_content",
|
|
47
|
+
"is_async": False,
|
|
48
|
+
},
|
|
49
|
+
{
|
|
50
|
+
"package": "vertexai.generative_models",
|
|
51
|
+
"object": "GenerativeModel",
|
|
52
|
+
"method": "generate_content_async",
|
|
53
|
+
"span_name": "vertexai.generate_content_async",
|
|
54
|
+
"is_async": True,
|
|
55
|
+
},
|
|
56
|
+
{
|
|
57
|
+
"package": "vertexai.generative_models",
|
|
58
|
+
"object": "ChatSession",
|
|
59
|
+
"method": "send_message",
|
|
60
|
+
"span_name": "vertexai.send_message",
|
|
61
|
+
"is_async": False,
|
|
62
|
+
},
|
|
63
|
+
{
|
|
64
|
+
"package": "vertexai.preview.generative_models",
|
|
65
|
+
"object": "GenerativeModel",
|
|
66
|
+
"method": "generate_content",
|
|
67
|
+
"span_name": "vertexai.generate_content",
|
|
68
|
+
"is_async": False,
|
|
69
|
+
},
|
|
70
|
+
{
|
|
71
|
+
"package": "vertexai.preview.generative_models",
|
|
72
|
+
"object": "GenerativeModel",
|
|
73
|
+
"method": "generate_content_async",
|
|
74
|
+
"span_name": "vertexai.generate_content_async",
|
|
75
|
+
"is_async": True,
|
|
76
|
+
},
|
|
77
|
+
{
|
|
78
|
+
"package": "vertexai.preview.generative_models",
|
|
79
|
+
"object": "ChatSession",
|
|
80
|
+
"method": "send_message",
|
|
81
|
+
"span_name": "vertexai.send_message",
|
|
82
|
+
"is_async": False,
|
|
83
|
+
},
|
|
84
|
+
{
|
|
85
|
+
"package": "vertexai.language_models",
|
|
86
|
+
"object": "TextGenerationModel",
|
|
87
|
+
"method": "predict",
|
|
88
|
+
"span_name": "vertexai.predict",
|
|
89
|
+
"is_async": False,
|
|
90
|
+
},
|
|
91
|
+
{
|
|
92
|
+
"package": "vertexai.language_models",
|
|
93
|
+
"object": "TextGenerationModel",
|
|
94
|
+
"method": "predict_async",
|
|
95
|
+
"span_name": "vertexai.predict_async",
|
|
96
|
+
"is_async": True,
|
|
97
|
+
},
|
|
98
|
+
{
|
|
99
|
+
"package": "vertexai.language_models",
|
|
100
|
+
"object": "TextGenerationModel",
|
|
101
|
+
"method": "predict_streaming",
|
|
102
|
+
"span_name": "vertexai.predict_streaming",
|
|
103
|
+
"is_async": False,
|
|
104
|
+
},
|
|
105
|
+
{
|
|
106
|
+
"package": "vertexai.language_models",
|
|
107
|
+
"object": "TextGenerationModel",
|
|
108
|
+
"method": "predict_streaming_async",
|
|
109
|
+
"span_name": "vertexai.predict_streaming_async",
|
|
110
|
+
"is_async": True,
|
|
111
|
+
},
|
|
112
|
+
{
|
|
113
|
+
"package": "vertexai.language_models",
|
|
114
|
+
"object": "ChatSession",
|
|
115
|
+
"method": "send_message",
|
|
116
|
+
"span_name": "vertexai.send_message",
|
|
117
|
+
"is_async": False,
|
|
118
|
+
},
|
|
119
|
+
{
|
|
120
|
+
"package": "vertexai.language_models",
|
|
121
|
+
"object": "ChatSession",
|
|
122
|
+
"method": "send_message_streaming",
|
|
123
|
+
"span_name": "vertexai.send_message_streaming",
|
|
124
|
+
"is_async": False,
|
|
125
|
+
},
|
|
126
|
+
]
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
def is_streaming_response(response):
|
|
130
|
+
return isinstance(response, types.GeneratorType)
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def is_async_streaming_response(response):
|
|
134
|
+
return isinstance(response, types.AsyncGeneratorType)
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
@dont_throw
|
|
138
|
+
def handle_streaming_response(span, event_logger, llm_model, response, token_usage):
|
|
139
|
+
set_model_response_attributes(span, llm_model, token_usage)
|
|
140
|
+
if should_emit_events():
|
|
141
|
+
emit_response_events(response, event_logger)
|
|
142
|
+
else:
|
|
143
|
+
set_response_attributes(span, llm_model, response)
|
|
144
|
+
if span.is_recording():
|
|
145
|
+
span.set_status(Status(StatusCode.OK))
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def _build_from_streaming_response(span, event_logger, response, llm_model):
|
|
149
|
+
complete_response = ""
|
|
150
|
+
token_usage = None
|
|
151
|
+
for item in response:
|
|
152
|
+
item_to_yield = item
|
|
153
|
+
complete_response += str(item.text)
|
|
154
|
+
if item.usage_metadata:
|
|
155
|
+
token_usage = item.usage_metadata
|
|
156
|
+
|
|
157
|
+
yield item_to_yield
|
|
158
|
+
|
|
159
|
+
handle_streaming_response(
|
|
160
|
+
span, event_logger, llm_model, complete_response, token_usage
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
span.set_status(Status(StatusCode.OK))
|
|
164
|
+
span.end()
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
async def _abuild_from_streaming_response(span, event_logger, response, llm_model):
|
|
168
|
+
complete_response = ""
|
|
169
|
+
token_usage = None
|
|
170
|
+
async for item in response:
|
|
171
|
+
item_to_yield = item
|
|
172
|
+
complete_response += str(item.text)
|
|
173
|
+
if item.usage_metadata:
|
|
174
|
+
token_usage = item.usage_metadata
|
|
175
|
+
|
|
176
|
+
yield item_to_yield
|
|
177
|
+
|
|
178
|
+
handle_streaming_response(span, event_logger, llm_model, response, token_usage)
|
|
179
|
+
|
|
180
|
+
span.set_status(Status(StatusCode.OK))
|
|
181
|
+
span.end()
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
@dont_throw
|
|
185
|
+
async def _handle_request(span, event_logger, args, kwargs, llm_model):
|
|
186
|
+
set_model_input_attributes(span, kwargs, llm_model)
|
|
187
|
+
if should_emit_events():
|
|
188
|
+
emit_prompt_events(args, event_logger)
|
|
189
|
+
else:
|
|
190
|
+
await set_input_attributes(span, args)
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
def _handle_response(span, event_logger, response, llm_model):
|
|
194
|
+
set_model_response_attributes(span, llm_model, response.usage_metadata)
|
|
195
|
+
if should_emit_events():
|
|
196
|
+
emit_response_events(response, event_logger)
|
|
197
|
+
else:
|
|
198
|
+
set_response_attributes(
|
|
199
|
+
span, llm_model, response.candidates[0].text if response.candidates else ""
|
|
200
|
+
)
|
|
201
|
+
if span.is_recording():
|
|
202
|
+
span.set_status(Status(StatusCode.OK))
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
def _with_tracer_wrapper(func):
|
|
206
|
+
"""Helper for providing tracer for wrapper functions."""
|
|
207
|
+
|
|
208
|
+
def _with_tracer(tracer, event_logger, to_wrap):
|
|
209
|
+
def wrapper(wrapped, instance, args, kwargs):
|
|
210
|
+
return func(tracer, event_logger, to_wrap, wrapped, instance, args, kwargs)
|
|
211
|
+
|
|
212
|
+
return wrapper
|
|
213
|
+
|
|
214
|
+
return _with_tracer
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
@_with_tracer_wrapper
|
|
218
|
+
async def _awrap(tracer, event_logger, to_wrap, wrapped, instance, args, kwargs):
|
|
219
|
+
"""Instruments and calls every function defined in TO_WRAP."""
|
|
220
|
+
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
|
|
221
|
+
SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
|
|
222
|
+
):
|
|
223
|
+
return await wrapped(*args, **kwargs)
|
|
224
|
+
|
|
225
|
+
llm_model = "unknown"
|
|
226
|
+
if hasattr(instance, "_model_id"):
|
|
227
|
+
llm_model = instance._model_id
|
|
228
|
+
if hasattr(instance, "_model_name"):
|
|
229
|
+
llm_model = instance._model_name.replace("publishers/google/models/", "")
|
|
230
|
+
# For ChatSession, try to get model from the parent model object
|
|
231
|
+
if hasattr(instance, "_model") and hasattr(instance._model, "_model_name"):
|
|
232
|
+
llm_model = instance._model._model_name.replace("publishers/google/models/", "")
|
|
233
|
+
elif hasattr(instance, "_model") and hasattr(instance._model, "_model_id"):
|
|
234
|
+
llm_model = instance._model._model_id
|
|
235
|
+
|
|
236
|
+
name = to_wrap.get("span_name")
|
|
237
|
+
span = tracer.start_span(
|
|
238
|
+
name,
|
|
239
|
+
kind=SpanKind.CLIENT,
|
|
240
|
+
attributes={
|
|
241
|
+
GenAIAttributes.GEN_AI_SYSTEM: "Google",
|
|
242
|
+
SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
|
|
243
|
+
},
|
|
244
|
+
)
|
|
245
|
+
|
|
246
|
+
await _handle_request(span, event_logger, args, kwargs, llm_model)
|
|
247
|
+
|
|
248
|
+
response = await wrapped(*args, **kwargs)
|
|
249
|
+
|
|
250
|
+
if response:
|
|
251
|
+
if is_streaming_response(response):
|
|
252
|
+
return _build_from_streaming_response(
|
|
253
|
+
span, event_logger, response, llm_model
|
|
254
|
+
)
|
|
255
|
+
elif is_async_streaming_response(response):
|
|
256
|
+
return _abuild_from_streaming_response(
|
|
257
|
+
span, event_logger, response, llm_model
|
|
258
|
+
)
|
|
259
|
+
else:
|
|
260
|
+
_handle_response(span, event_logger, response, llm_model)
|
|
261
|
+
|
|
262
|
+
span.end()
|
|
263
|
+
return response
|
|
264
|
+
|
|
265
|
+
|
|
266
|
+
@_with_tracer_wrapper
|
|
267
|
+
def _wrap(tracer, event_logger, to_wrap, wrapped, instance, args, kwargs):
|
|
268
|
+
"""Instruments and calls every function defined in TO_WRAP."""
|
|
269
|
+
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
|
|
270
|
+
SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
|
|
271
|
+
):
|
|
272
|
+
return wrapped(*args, **kwargs)
|
|
273
|
+
|
|
274
|
+
llm_model = "unknown"
|
|
275
|
+
if hasattr(instance, "_model_id"):
|
|
276
|
+
llm_model = instance._model_id
|
|
277
|
+
if hasattr(instance, "_model_name"):
|
|
278
|
+
llm_model = instance._model_name.replace("publishers/google/models/", "")
|
|
279
|
+
# For ChatSession, try to get model from the parent model object
|
|
280
|
+
if hasattr(instance, "_model") and hasattr(instance._model, "_model_name"):
|
|
281
|
+
llm_model = instance._model._model_name.replace("publishers/google/models/", "")
|
|
282
|
+
elif hasattr(instance, "_model") and hasattr(instance._model, "_model_id"):
|
|
283
|
+
llm_model = instance._model._model_id
|
|
284
|
+
|
|
285
|
+
name = to_wrap.get("span_name")
|
|
286
|
+
span = tracer.start_span(
|
|
287
|
+
name,
|
|
288
|
+
kind=SpanKind.CLIENT,
|
|
289
|
+
attributes={
|
|
290
|
+
GenAIAttributes.GEN_AI_SYSTEM: "Google",
|
|
291
|
+
SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
|
|
292
|
+
},
|
|
293
|
+
)
|
|
294
|
+
|
|
295
|
+
# Use sync version for non-async wrapper to avoid image processing for now
|
|
296
|
+
set_model_input_attributes(span, kwargs, llm_model)
|
|
297
|
+
if should_emit_events():
|
|
298
|
+
emit_prompt_events(args, event_logger)
|
|
299
|
+
else:
|
|
300
|
+
set_input_attributes_sync(span, args)
|
|
301
|
+
|
|
302
|
+
response = wrapped(*args, **kwargs)
|
|
303
|
+
|
|
304
|
+
if response:
|
|
305
|
+
if is_streaming_response(response):
|
|
306
|
+
return _build_from_streaming_response(
|
|
307
|
+
span, event_logger, response, llm_model
|
|
308
|
+
)
|
|
309
|
+
elif is_async_streaming_response(response):
|
|
310
|
+
return _abuild_from_streaming_response(
|
|
311
|
+
span, event_logger, response, llm_model
|
|
312
|
+
)
|
|
313
|
+
else:
|
|
314
|
+
_handle_response(span, event_logger, response, llm_model)
|
|
315
|
+
|
|
316
|
+
span.end()
|
|
317
|
+
return response
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
class VertexAIInstrumentor(BaseInstrumentor):
|
|
321
|
+
"""An instrumentor for VertextAI's client library."""
|
|
322
|
+
|
|
323
|
+
def __init__(self, exception_logger=None, use_legacy_attributes=True, upload_base64_image=None):
|
|
324
|
+
super().__init__()
|
|
325
|
+
Config.exception_logger = exception_logger
|
|
326
|
+
Config.use_legacy_attributes = use_legacy_attributes
|
|
327
|
+
if upload_base64_image:
|
|
328
|
+
Config.upload_base64_image = upload_base64_image
|
|
329
|
+
|
|
330
|
+
def instrumentation_dependencies(self) -> Collection[str]:
|
|
331
|
+
return _instruments
|
|
332
|
+
|
|
333
|
+
def _instrument(self, **kwargs):
|
|
334
|
+
tracer_provider = kwargs.get("tracer_provider")
|
|
335
|
+
tracer = get_tracer(__name__, __version__, tracer_provider)
|
|
336
|
+
|
|
337
|
+
event_logger = None
|
|
338
|
+
|
|
339
|
+
if should_emit_events():
|
|
340
|
+
logger_provider = kwargs.get("logger_provider")
|
|
341
|
+
event_logger = get_logger(
|
|
342
|
+
__name__,
|
|
343
|
+
__version__,
|
|
344
|
+
logger_provider=logger_provider,
|
|
345
|
+
)
|
|
346
|
+
|
|
347
|
+
for wrapped_method in WRAPPED_METHODS:
|
|
348
|
+
wrap_package = wrapped_method.get("package")
|
|
349
|
+
wrap_object = wrapped_method.get("object")
|
|
350
|
+
wrap_method = wrapped_method.get("method")
|
|
351
|
+
|
|
352
|
+
wrap_function_wrapper(
|
|
353
|
+
wrap_package,
|
|
354
|
+
f"{wrap_object}.{wrap_method}",
|
|
355
|
+
(
|
|
356
|
+
_awrap(tracer, event_logger, wrapped_method)
|
|
357
|
+
if wrapped_method.get("is_async")
|
|
358
|
+
else _wrap(tracer, event_logger, wrapped_method)
|
|
359
|
+
),
|
|
360
|
+
)
|
|
361
|
+
|
|
362
|
+
def _uninstrument(self, **kwargs):
|
|
363
|
+
for wrapped_method in WRAPPED_METHODS:
|
|
364
|
+
wrap_package = wrapped_method.get("package")
|
|
365
|
+
wrap_object = wrapped_method.get("object")
|
|
366
|
+
unwrap(
|
|
367
|
+
f"{wrap_package}.{wrap_object}",
|
|
368
|
+
wrapped_method.get("method", ""),
|
|
369
|
+
)
|
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
from dataclasses import asdict
|
|
2
|
+
from enum import Enum
|
|
3
|
+
from typing import Union
|
|
4
|
+
|
|
5
|
+
from opentelemetry._logs import LogRecord
|
|
6
|
+
from opentelemetry.instrumentation.vertexai.event_models import (
|
|
7
|
+
ChoiceEvent,
|
|
8
|
+
MessageEvent,
|
|
9
|
+
)
|
|
10
|
+
from opentelemetry.instrumentation.vertexai.utils import (
|
|
11
|
+
dont_throw,
|
|
12
|
+
should_emit_events,
|
|
13
|
+
should_send_prompts,
|
|
14
|
+
)
|
|
15
|
+
from opentelemetry.semconv._incubating.attributes import (
|
|
16
|
+
gen_ai_attributes as GenAIAttributes,
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
from vertexai.generative_models import GenerationResponse
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class Roles(Enum):
|
|
23
|
+
USER = "user"
|
|
24
|
+
ASSISTANT = "assistant"
|
|
25
|
+
SYSTEM = "system"
|
|
26
|
+
TOOL = "tool"
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
VALID_MESSAGE_ROLES = {role.value for role in Roles}
|
|
30
|
+
"""The valid roles for naming the message event."""
|
|
31
|
+
|
|
32
|
+
EVENT_ATTRIBUTES = {
|
|
33
|
+
GenAIAttributes.GEN_AI_SYSTEM: GenAIAttributes.GenAiSystemValues.VERTEX_AI.value
|
|
34
|
+
}
|
|
35
|
+
"""The attributes to be used for the event."""
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def _parse_vertex_finish_reason(reason):
|
|
39
|
+
if reason is None:
|
|
40
|
+
return "unknown"
|
|
41
|
+
|
|
42
|
+
finish_reason_map = {
|
|
43
|
+
0: "unspecified",
|
|
44
|
+
1: "stop",
|
|
45
|
+
2: "max_tokens",
|
|
46
|
+
3: "safety",
|
|
47
|
+
4: "recitation",
|
|
48
|
+
5: "other",
|
|
49
|
+
6: "blocklist",
|
|
50
|
+
7: "prohibited_content",
|
|
51
|
+
8: "spii",
|
|
52
|
+
9: "malformed_function_call",
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
if hasattr(reason, "value"):
|
|
56
|
+
reason_value = reason.value
|
|
57
|
+
else:
|
|
58
|
+
reason_value = reason
|
|
59
|
+
|
|
60
|
+
return finish_reason_map.get(reason_value, "unknown")
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
@dont_throw
|
|
64
|
+
def emit_prompt_events(args, event_logger):
|
|
65
|
+
prompt = ""
|
|
66
|
+
if args is not None and len(args) > 0:
|
|
67
|
+
for arg in args:
|
|
68
|
+
if isinstance(arg, str):
|
|
69
|
+
prompt = f"{prompt}{arg}\n"
|
|
70
|
+
elif isinstance(arg, list):
|
|
71
|
+
for subarg in arg:
|
|
72
|
+
prompt = f"{prompt}{subarg}\n"
|
|
73
|
+
emit_event(MessageEvent(content=prompt, role=Roles.USER.value), event_logger)
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def emit_response_events(response, event_logger):
|
|
77
|
+
if isinstance(response, str):
|
|
78
|
+
emit_event(
|
|
79
|
+
ChoiceEvent(
|
|
80
|
+
index=0,
|
|
81
|
+
message={"content": response, "role": Roles.ASSISTANT.value},
|
|
82
|
+
finish_reason="unknown",
|
|
83
|
+
),
|
|
84
|
+
event_logger,
|
|
85
|
+
)
|
|
86
|
+
elif isinstance(response, GenerationResponse):
|
|
87
|
+
for candidate in response.candidates:
|
|
88
|
+
emit_event(
|
|
89
|
+
ChoiceEvent(
|
|
90
|
+
index=candidate.index,
|
|
91
|
+
message={
|
|
92
|
+
"content": candidate.text,
|
|
93
|
+
"role": Roles.ASSISTANT.value,
|
|
94
|
+
},
|
|
95
|
+
finish_reason=_parse_vertex_finish_reason(candidate.finish_reason),
|
|
96
|
+
),
|
|
97
|
+
event_logger,
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def emit_event(event: Union[MessageEvent, ChoiceEvent], event_logger) -> None:
|
|
102
|
+
"""
|
|
103
|
+
Emit an event to the OpenTelemetry SDK.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
event: The event to emit.
|
|
107
|
+
"""
|
|
108
|
+
if not should_emit_events() or event_logger is None:
|
|
109
|
+
return
|
|
110
|
+
|
|
111
|
+
if isinstance(event, MessageEvent):
|
|
112
|
+
_emit_message_event(event, event_logger)
|
|
113
|
+
elif isinstance(event, ChoiceEvent):
|
|
114
|
+
_emit_choice_event(event, event_logger)
|
|
115
|
+
else:
|
|
116
|
+
raise TypeError("Unsupported event type")
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def _emit_message_event(event: MessageEvent, event_logger) -> None:
|
|
120
|
+
body = asdict(event)
|
|
121
|
+
|
|
122
|
+
if event.role in VALID_MESSAGE_ROLES:
|
|
123
|
+
name = "gen_ai.{}.message".format(event.role)
|
|
124
|
+
# According to the semantic conventions, the role is conditionally required if available
|
|
125
|
+
# and not equal to the "role" in the message name. So, remove the role from the body if
|
|
126
|
+
# it is the same as the in the event name.
|
|
127
|
+
body.pop("role", None)
|
|
128
|
+
else:
|
|
129
|
+
name = "gen_ai.user.message"
|
|
130
|
+
|
|
131
|
+
# According to the semantic conventions, only the assistant role has tool call
|
|
132
|
+
if event.role != Roles.ASSISTANT.value and event.tool_calls is not None:
|
|
133
|
+
del body["tool_calls"]
|
|
134
|
+
elif event.tool_calls is None:
|
|
135
|
+
del body["tool_calls"]
|
|
136
|
+
|
|
137
|
+
if not should_send_prompts():
|
|
138
|
+
del body["content"]
|
|
139
|
+
if body.get("tool_calls") is not None:
|
|
140
|
+
for tool_call in body["tool_calls"]:
|
|
141
|
+
tool_call["function"].pop("arguments", None)
|
|
142
|
+
|
|
143
|
+
log_record = LogRecord(
|
|
144
|
+
body=body,
|
|
145
|
+
attributes=EVENT_ATTRIBUTES,
|
|
146
|
+
event_name=name
|
|
147
|
+
)
|
|
148
|
+
event_logger.emit(log_record)
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def _emit_choice_event(event: ChoiceEvent, event_logger) -> None:
|
|
152
|
+
body = asdict(event)
|
|
153
|
+
if event.message["role"] == Roles.ASSISTANT.value:
|
|
154
|
+
# According to the semantic conventions, the role is conditionally required if available
|
|
155
|
+
# and not equal to "assistant", so remove the role from the body if it is "assistant".
|
|
156
|
+
body["message"].pop("role", None)
|
|
157
|
+
|
|
158
|
+
if event.tool_calls is None:
|
|
159
|
+
del body["tool_calls"]
|
|
160
|
+
|
|
161
|
+
if not should_send_prompts():
|
|
162
|
+
body["message"].pop("content", None)
|
|
163
|
+
if body.get("tool_calls") is not None:
|
|
164
|
+
for tool_call in body["tool_calls"]:
|
|
165
|
+
tool_call["function"].pop("arguments", None)
|
|
166
|
+
|
|
167
|
+
log_record = LogRecord(
|
|
168
|
+
body=body,
|
|
169
|
+
attributes=EVENT_ATTRIBUTES,
|
|
170
|
+
event_name="gen_ai.choice"
|
|
171
|
+
|
|
172
|
+
)
|
|
173
|
+
event_logger.emit(log_record)
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from typing import Any, List, Literal, Optional, TypedDict
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class _FunctionToolCall(TypedDict):
|
|
6
|
+
function_name: str
|
|
7
|
+
arguments: Optional[dict[str, Any]]
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class ToolCall(TypedDict):
|
|
11
|
+
"""Represents a tool call in the AI model."""
|
|
12
|
+
|
|
13
|
+
id: str
|
|
14
|
+
function: _FunctionToolCall
|
|
15
|
+
type: Literal["function"]
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class CompletionMessage(TypedDict):
|
|
19
|
+
"""Represents a message in the AI model."""
|
|
20
|
+
|
|
21
|
+
content: Any
|
|
22
|
+
role: str = "assistant"
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
@dataclass
|
|
26
|
+
class MessageEvent:
|
|
27
|
+
"""Represents an input event for the AI model."""
|
|
28
|
+
|
|
29
|
+
content: Any
|
|
30
|
+
role: str = "user"
|
|
31
|
+
tool_calls: Optional[List[ToolCall]] = None
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
@dataclass
|
|
35
|
+
class ChoiceEvent:
|
|
36
|
+
"""Represents a completion event for the AI model."""
|
|
37
|
+
|
|
38
|
+
index: int
|
|
39
|
+
message: CompletionMessage
|
|
40
|
+
finish_reason: str = "unknown"
|
|
41
|
+
tool_calls: Optional[List[ToolCall]] = None
|
|
@@ -0,0 +1,313 @@
|
|
|
1
|
+
import copy
|
|
2
|
+
import json
|
|
3
|
+
import base64
|
|
4
|
+
import logging
|
|
5
|
+
import asyncio
|
|
6
|
+
import threading
|
|
7
|
+
from opentelemetry.instrumentation.vertexai.utils import dont_throw, should_send_prompts
|
|
8
|
+
from opentelemetry.instrumentation.vertexai.config import Config
|
|
9
|
+
from opentelemetry.semconv._incubating.attributes import (
|
|
10
|
+
gen_ai_attributes as GenAIAttributes,
|
|
11
|
+
)
|
|
12
|
+
from opentelemetry.semconv_ai import SpanAttributes
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def _set_span_attribute(span, name, value):
|
|
19
|
+
if value is not None:
|
|
20
|
+
if value != "":
|
|
21
|
+
span.set_attribute(name, value)
|
|
22
|
+
return
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def _is_base64_image_part(item):
|
|
26
|
+
"""Check if item is a VertexAI Part object containing image data"""
|
|
27
|
+
try:
|
|
28
|
+
# Check if it has the Part attributes we expect
|
|
29
|
+
if not hasattr(item, 'inline_data') or not hasattr(item, 'mime_type'):
|
|
30
|
+
return False
|
|
31
|
+
|
|
32
|
+
# Check if it's an image mime type and has inline data
|
|
33
|
+
if item.mime_type and 'image/' in item.mime_type and item.inline_data:
|
|
34
|
+
# Check if the inline_data has actual data
|
|
35
|
+
if hasattr(item.inline_data, 'data') and item.inline_data.data:
|
|
36
|
+
return True
|
|
37
|
+
|
|
38
|
+
return False
|
|
39
|
+
except Exception:
|
|
40
|
+
return False
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
async def _process_image_part(item, trace_id, span_id, content_index):
|
|
44
|
+
"""Process a VertexAI Part object containing image data"""
|
|
45
|
+
if not Config.upload_base64_image:
|
|
46
|
+
return None
|
|
47
|
+
|
|
48
|
+
try:
|
|
49
|
+
# Extract format from mime type (e.g., 'image/jpeg' -> 'jpeg')
|
|
50
|
+
image_format = item.mime_type.split('/')[1] if item.mime_type else 'unknown'
|
|
51
|
+
image_name = f"content_{content_index}.{image_format}"
|
|
52
|
+
|
|
53
|
+
# Convert binary data to base64 string for upload
|
|
54
|
+
binary_data = item.inline_data.data
|
|
55
|
+
base64_string = base64.b64encode(binary_data).decode('utf-8')
|
|
56
|
+
|
|
57
|
+
# Upload the base64 data - convert IDs to strings
|
|
58
|
+
url = await Config.upload_base64_image(str(trace_id), str(span_id), image_name, base64_string)
|
|
59
|
+
|
|
60
|
+
# Return OpenAI-compatible format for consistency across LLM providers
|
|
61
|
+
return {
|
|
62
|
+
"type": "image_url",
|
|
63
|
+
"image_url": {"url": url}
|
|
64
|
+
}
|
|
65
|
+
except Exception as e:
|
|
66
|
+
logger.warning(f"Failed to process image part: {e}")
|
|
67
|
+
# Return None to skip adding this image to the span
|
|
68
|
+
return None
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def run_async(method):
|
|
72
|
+
"""Handle async method in sync context, following OpenAI's battle-tested approach"""
|
|
73
|
+
try:
|
|
74
|
+
loop = asyncio.get_running_loop()
|
|
75
|
+
except RuntimeError:
|
|
76
|
+
loop = None
|
|
77
|
+
|
|
78
|
+
if loop and loop.is_running():
|
|
79
|
+
thread = threading.Thread(target=lambda: asyncio.run(method))
|
|
80
|
+
thread.start()
|
|
81
|
+
thread.join()
|
|
82
|
+
else:
|
|
83
|
+
asyncio.run(method)
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def _process_image_part_sync(item, trace_id, span_id, content_index):
|
|
87
|
+
"""Synchronous version of image part processing using OpenAI's pattern"""
|
|
88
|
+
if not Config.upload_base64_image:
|
|
89
|
+
return None
|
|
90
|
+
|
|
91
|
+
try:
|
|
92
|
+
# Extract format from mime type (e.g., 'image/jpeg' -> 'jpeg')
|
|
93
|
+
image_format = item.mime_type.split('/')[1] if item.mime_type else 'unknown'
|
|
94
|
+
image_name = f"content_{content_index}.{image_format}"
|
|
95
|
+
|
|
96
|
+
# Convert binary data to base64 string for upload
|
|
97
|
+
binary_data = item.inline_data.data
|
|
98
|
+
base64_string = base64.b64encode(binary_data).decode('utf-8')
|
|
99
|
+
|
|
100
|
+
# Use OpenAI's run_async pattern to handle the async upload function
|
|
101
|
+
url = None
|
|
102
|
+
|
|
103
|
+
async def upload_task():
|
|
104
|
+
nonlocal url
|
|
105
|
+
url = await Config.upload_base64_image(str(trace_id), str(span_id), image_name, base64_string)
|
|
106
|
+
|
|
107
|
+
run_async(upload_task())
|
|
108
|
+
|
|
109
|
+
return {
|
|
110
|
+
"type": "image_url",
|
|
111
|
+
"image_url": {"url": url}
|
|
112
|
+
}
|
|
113
|
+
except Exception as e:
|
|
114
|
+
logger.warning(f"Failed to process image part sync: {e}")
|
|
115
|
+
# Return None to skip adding this image to the span
|
|
116
|
+
return None
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
async def _process_vertexai_argument(argument, span):
|
|
120
|
+
"""Process a single argument for VertexAI, handling different types"""
|
|
121
|
+
if isinstance(argument, str):
|
|
122
|
+
# Simple text argument in OpenAI format
|
|
123
|
+
return [{"type": "text", "text": argument}]
|
|
124
|
+
|
|
125
|
+
elif isinstance(argument, list):
|
|
126
|
+
# List of mixed content (text strings and Part objects) - deep copy and process
|
|
127
|
+
content_list = copy.deepcopy(argument)
|
|
128
|
+
processed_items = []
|
|
129
|
+
|
|
130
|
+
for item_index, content_item in enumerate(content_list):
|
|
131
|
+
processed_item = await _process_content_item_vertexai(content_item, span, item_index)
|
|
132
|
+
if processed_item is not None:
|
|
133
|
+
processed_items.append(processed_item)
|
|
134
|
+
|
|
135
|
+
return processed_items
|
|
136
|
+
|
|
137
|
+
else:
|
|
138
|
+
# Single Part object - convert to OpenAI format
|
|
139
|
+
processed_item = await _process_content_item_vertexai(argument, span, 0)
|
|
140
|
+
return [processed_item] if processed_item is not None else []
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
async def _process_content_item_vertexai(content_item, span, item_index):
|
|
144
|
+
"""Process a single content item for VertexAI"""
|
|
145
|
+
if isinstance(content_item, str):
|
|
146
|
+
# Convert text to OpenAI format
|
|
147
|
+
return {"type": "text", "text": content_item}
|
|
148
|
+
|
|
149
|
+
elif _is_base64_image_part(content_item):
|
|
150
|
+
# Process image part
|
|
151
|
+
return await _process_image_part(
|
|
152
|
+
content_item, span.context.trace_id, span.context.span_id, item_index
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
elif hasattr(content_item, 'text'):
|
|
156
|
+
# Text part to OpenAI format
|
|
157
|
+
return {"type": "text", "text": content_item.text}
|
|
158
|
+
|
|
159
|
+
else:
|
|
160
|
+
# Other types as text
|
|
161
|
+
return {"type": "text", "text": str(content_item)}
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
def _process_vertexai_argument_sync(argument, span):
|
|
165
|
+
"""Synchronous version of argument processing for VertexAI"""
|
|
166
|
+
if isinstance(argument, str):
|
|
167
|
+
# Simple text argument in OpenAI format
|
|
168
|
+
return [{"type": "text", "text": argument}]
|
|
169
|
+
|
|
170
|
+
elif isinstance(argument, list):
|
|
171
|
+
# List of mixed content (text strings and Part objects) - deep copy and process
|
|
172
|
+
content_list = copy.deepcopy(argument)
|
|
173
|
+
processed_items = []
|
|
174
|
+
|
|
175
|
+
for item_index, content_item in enumerate(content_list):
|
|
176
|
+
processed_item = _process_content_item_vertexai_sync(content_item, span, item_index)
|
|
177
|
+
if processed_item is not None:
|
|
178
|
+
processed_items.append(processed_item)
|
|
179
|
+
|
|
180
|
+
return processed_items
|
|
181
|
+
|
|
182
|
+
else:
|
|
183
|
+
# Single Part object - convert to OpenAI format
|
|
184
|
+
processed_item = _process_content_item_vertexai_sync(argument, span, 0)
|
|
185
|
+
return [processed_item] if processed_item is not None else []
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def _process_content_item_vertexai_sync(content_item, span, item_index):
|
|
189
|
+
"""Synchronous version of content item processing for VertexAI"""
|
|
190
|
+
if isinstance(content_item, str):
|
|
191
|
+
# Convert text to OpenAI format
|
|
192
|
+
return {"type": "text", "text": content_item}
|
|
193
|
+
|
|
194
|
+
elif _is_base64_image_part(content_item):
|
|
195
|
+
# Process image part
|
|
196
|
+
return _process_image_part_sync(
|
|
197
|
+
content_item, span.context.trace_id, span.context.span_id, item_index
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
elif hasattr(content_item, 'text'):
|
|
201
|
+
# Text part to OpenAI format
|
|
202
|
+
return {"type": "text", "text": content_item.text}
|
|
203
|
+
|
|
204
|
+
else:
|
|
205
|
+
# Other types as text
|
|
206
|
+
return {"type": "text", "text": str(content_item)}
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
@dont_throw
|
|
210
|
+
async def set_input_attributes(span, args):
|
|
211
|
+
"""Process input arguments, handling both text and image content"""
|
|
212
|
+
if not span.is_recording():
|
|
213
|
+
return
|
|
214
|
+
if should_send_prompts() and args is not None and len(args) > 0:
|
|
215
|
+
# Process each argument using extracted helper methods
|
|
216
|
+
for arg_index, argument in enumerate(args):
|
|
217
|
+
processed_content = await _process_vertexai_argument(argument, span)
|
|
218
|
+
|
|
219
|
+
if processed_content:
|
|
220
|
+
_set_span_attribute(
|
|
221
|
+
span,
|
|
222
|
+
f"{GenAIAttributes.GEN_AI_PROMPT}.{arg_index}.role",
|
|
223
|
+
"user"
|
|
224
|
+
)
|
|
225
|
+
_set_span_attribute(
|
|
226
|
+
span,
|
|
227
|
+
f"{GenAIAttributes.GEN_AI_PROMPT}.{arg_index}.content",
|
|
228
|
+
json.dumps(processed_content)
|
|
229
|
+
)
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
# Sync version with image processing support
|
|
233
|
+
@dont_throw
|
|
234
|
+
def set_input_attributes_sync(span, args):
|
|
235
|
+
"""Synchronous version with image processing support"""
|
|
236
|
+
if not span.is_recording():
|
|
237
|
+
return
|
|
238
|
+
if should_send_prompts() and args is not None and len(args) > 0:
|
|
239
|
+
# Process each argument using extracted helper methods
|
|
240
|
+
for arg_index, argument in enumerate(args):
|
|
241
|
+
processed_content = _process_vertexai_argument_sync(argument, span)
|
|
242
|
+
|
|
243
|
+
if processed_content:
|
|
244
|
+
_set_span_attribute(
|
|
245
|
+
span,
|
|
246
|
+
f"{GenAIAttributes.GEN_AI_PROMPT}.{arg_index}.role",
|
|
247
|
+
"user"
|
|
248
|
+
)
|
|
249
|
+
_set_span_attribute(
|
|
250
|
+
span,
|
|
251
|
+
f"{GenAIAttributes.GEN_AI_PROMPT}.{arg_index}.content",
|
|
252
|
+
json.dumps(processed_content)
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
|
|
256
|
+
@dont_throw
|
|
257
|
+
def set_model_input_attributes(span, kwargs, llm_model):
|
|
258
|
+
if not span.is_recording():
|
|
259
|
+
return
|
|
260
|
+
_set_span_attribute(span, GenAIAttributes.GEN_AI_REQUEST_MODEL, llm_model)
|
|
261
|
+
_set_span_attribute(
|
|
262
|
+
span, f"{GenAIAttributes.GEN_AI_PROMPT}.0.user", kwargs.get("prompt")
|
|
263
|
+
)
|
|
264
|
+
_set_span_attribute(
|
|
265
|
+
span, GenAIAttributes.GEN_AI_REQUEST_TEMPERATURE, kwargs.get("temperature")
|
|
266
|
+
)
|
|
267
|
+
_set_span_attribute(
|
|
268
|
+
span, GenAIAttributes.GEN_AI_REQUEST_MAX_TOKENS, kwargs.get("max_output_tokens")
|
|
269
|
+
)
|
|
270
|
+
_set_span_attribute(span, GenAIAttributes.GEN_AI_REQUEST_TOP_P, kwargs.get("top_p"))
|
|
271
|
+
_set_span_attribute(span, GenAIAttributes.GEN_AI_REQUEST_TOP_K, kwargs.get("top_k"))
|
|
272
|
+
_set_span_attribute(
|
|
273
|
+
span, SpanAttributes.LLM_PRESENCE_PENALTY, kwargs.get("presence_penalty")
|
|
274
|
+
)
|
|
275
|
+
_set_span_attribute(
|
|
276
|
+
span, SpanAttributes.LLM_FREQUENCY_PENALTY, kwargs.get("frequency_penalty")
|
|
277
|
+
)
|
|
278
|
+
|
|
279
|
+
|
|
280
|
+
@dont_throw
|
|
281
|
+
def set_response_attributes(span, llm_model, generation_text):
|
|
282
|
+
if not span.is_recording() or not should_send_prompts():
|
|
283
|
+
return
|
|
284
|
+
_set_span_attribute(span, f"{GenAIAttributes.GEN_AI_COMPLETION}.0.role", "assistant")
|
|
285
|
+
_set_span_attribute(
|
|
286
|
+
span,
|
|
287
|
+
f"{GenAIAttributes.GEN_AI_COMPLETION}.0.content",
|
|
288
|
+
generation_text,
|
|
289
|
+
)
|
|
290
|
+
|
|
291
|
+
|
|
292
|
+
@dont_throw
|
|
293
|
+
def set_model_response_attributes(span, llm_model, token_usage):
|
|
294
|
+
if not span.is_recording():
|
|
295
|
+
return
|
|
296
|
+
_set_span_attribute(span, GenAIAttributes.GEN_AI_RESPONSE_MODEL, llm_model)
|
|
297
|
+
|
|
298
|
+
if token_usage:
|
|
299
|
+
_set_span_attribute(
|
|
300
|
+
span,
|
|
301
|
+
SpanAttributes.LLM_USAGE_TOTAL_TOKENS,
|
|
302
|
+
token_usage.total_token_count,
|
|
303
|
+
)
|
|
304
|
+
_set_span_attribute(
|
|
305
|
+
span,
|
|
306
|
+
GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS,
|
|
307
|
+
token_usage.candidates_token_count,
|
|
308
|
+
)
|
|
309
|
+
_set_span_attribute(
|
|
310
|
+
span,
|
|
311
|
+
GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS,
|
|
312
|
+
token_usage.prompt_token_count,
|
|
313
|
+
)
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import os
|
|
3
|
+
import traceback
|
|
4
|
+
|
|
5
|
+
from opentelemetry import context as context_api
|
|
6
|
+
from opentelemetry.instrumentation.vertexai.config import Config
|
|
7
|
+
|
|
8
|
+
TRACELOOP_TRACE_CONTENT = "TRACELOOP_TRACE_CONTENT"
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def should_send_prompts():
|
|
12
|
+
return (
|
|
13
|
+
os.getenv(TRACELOOP_TRACE_CONTENT) or "true"
|
|
14
|
+
).lower() == "true" or context_api.get_value("override_enable_content_tracing")
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def dont_throw(func):
|
|
18
|
+
"""
|
|
19
|
+
A decorator that wraps the passed in function and logs exceptions instead of throwing them.
|
|
20
|
+
|
|
21
|
+
@param func: The function to wrap
|
|
22
|
+
@return: The wrapper function
|
|
23
|
+
"""
|
|
24
|
+
# Obtain a logger specific to the function's module
|
|
25
|
+
logger = logging.getLogger(func.__module__)
|
|
26
|
+
|
|
27
|
+
def wrapper(*args, **kwargs):
|
|
28
|
+
try:
|
|
29
|
+
return func(*args, **kwargs)
|
|
30
|
+
except Exception as e:
|
|
31
|
+
logger.debug(
|
|
32
|
+
"OpenLLMetry failed to trace in %s, error: %s",
|
|
33
|
+
func.__name__,
|
|
34
|
+
traceback.format_exc(),
|
|
35
|
+
)
|
|
36
|
+
if Config.exception_logger:
|
|
37
|
+
Config.exception_logger(e)
|
|
38
|
+
|
|
39
|
+
return wrapper
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def should_emit_events():
|
|
43
|
+
return not Config.use_legacy_attributes
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.49.0"
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: opentelemetry-instrumentation-vertexai
|
|
3
|
+
Version: 0.49.0
|
|
4
|
+
Summary: OpenTelemetry Vertex AI instrumentation
|
|
5
|
+
License: Apache-2.0
|
|
6
|
+
Author: Gal Kleinman
|
|
7
|
+
Author-email: gal@traceloop.com
|
|
8
|
+
Requires-Python: >=3.9,<4
|
|
9
|
+
Classifier: License :: OSI Approved :: Apache Software License
|
|
10
|
+
Classifier: Programming Language :: Python :: 3
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.14
|
|
17
|
+
Provides-Extra: instruments
|
|
18
|
+
Requires-Dist: opentelemetry-api (>=1.38.0,<2.0.0)
|
|
19
|
+
Requires-Dist: opentelemetry-instrumentation (>=0.59b0)
|
|
20
|
+
Requires-Dist: opentelemetry-semantic-conventions (>=0.59b0)
|
|
21
|
+
Requires-Dist: opentelemetry-semantic-conventions-ai (>=0.4.13,<0.5.0)
|
|
22
|
+
Project-URL: Repository, https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-vertexai
|
|
23
|
+
Description-Content-Type: text/markdown
|
|
24
|
+
|
|
25
|
+
# OpenTelemetry VertexAI Instrumentation
|
|
26
|
+
|
|
27
|
+
<a href="https://pypi.org/project/opentelemetry-instrumentation-vertexai/">
|
|
28
|
+
<img src="https://badge.fury.io/py/opentelemetry-instrumentation-vertexai.svg">
|
|
29
|
+
</a>
|
|
30
|
+
|
|
31
|
+
This library allows tracing VertexAI prompts and completions sent with the official [VertexAI library](https://github.com/googleapis/python-aiplatform).
|
|
32
|
+
|
|
33
|
+
## Installation
|
|
34
|
+
|
|
35
|
+
```bash
|
|
36
|
+
pip install opentelemetry-instrumentation-vertexai
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
## Example usage
|
|
40
|
+
|
|
41
|
+
```python
|
|
42
|
+
from opentelemetry.instrumentation.vertexai import VertexAIInstrumentor
|
|
43
|
+
|
|
44
|
+
VertexAIInstrumentor().instrument()
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
## Privacy
|
|
48
|
+
|
|
49
|
+
**By default, this instrumentation logs prompts, completions, and embeddings to span attributes**. This gives you a clear visibility into how your LLM application is working, and can make it easy to debug and evaluate the quality of the outputs.
|
|
50
|
+
|
|
51
|
+
However, you may want to disable this logging for privacy reasons, as they may contain highly sensitive data from your users. You may also simply want to reduce the size of your traces.
|
|
52
|
+
|
|
53
|
+
To disable logging, set the `TRACELOOP_TRACE_CONTENT` environment variable to `false`.
|
|
54
|
+
|
|
55
|
+
```bash
|
|
56
|
+
TRACELOOP_TRACE_CONTENT=false
|
|
57
|
+
```
|
|
58
|
+
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
opentelemetry/instrumentation/vertexai/__init__.py,sha256=u6CYdn_1a0TMlARH6oFuO9MsuV5PC83BekE-r_721mU,12363
|
|
2
|
+
opentelemetry/instrumentation/vertexai/config.py,sha256=LDyIH2dNsQsyFGy3otuvLWnRwf1hT_ivncioMPW8_ks,241
|
|
3
|
+
opentelemetry/instrumentation/vertexai/event_emitter.py,sha256=L0dp4N8butaMx48J33eu9drmRDLxF0k0FucqrTUwA9E,5168
|
|
4
|
+
opentelemetry/instrumentation/vertexai/event_models.py,sha256=PCfCGxrrArwZqR-4wFcXrhwQq0sBMAxmSrpC4PUMtaM,876
|
|
5
|
+
opentelemetry/instrumentation/vertexai/span_utils.py,sha256=y3y3JtyJhfiGdYYiV3e6s1nxw2j6jANyJgQm5ddl4z8,11062
|
|
6
|
+
opentelemetry/instrumentation/vertexai/utils.py,sha256=Rj-TT_GQFhfi1F1rugvDRFxl4Xo4D-rOYJojOK8iblI,1172
|
|
7
|
+
opentelemetry/instrumentation/vertexai/version.py,sha256=09FNUq8XbPsGZin0IRhNT7qwi97ZXQafNn7GixZiqAw,23
|
|
8
|
+
opentelemetry_instrumentation_vertexai-0.49.0.dist-info/METADATA,sha256=3YW9M2IUDtiHneuCM-K3UaxF_yZ6t71PA8GSG9Y0egE,2241
|
|
9
|
+
opentelemetry_instrumentation_vertexai-0.49.0.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
|
|
10
|
+
opentelemetry_instrumentation_vertexai-0.49.0.dist-info/entry_points.txt,sha256=HbacwtKx_31YuUruZKYKWOiTGnRw3YaazUKF3TPbzDc,114
|
|
11
|
+
opentelemetry_instrumentation_vertexai-0.49.0.dist-info/RECORD,,
|