opentelemetry-instrumentation-vertexai 0.19.0__tar.gz → 0.21.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of opentelemetry-instrumentation-vertexai might be problematic. Click here for more details.
- {opentelemetry_instrumentation_vertexai-0.19.0 → opentelemetry_instrumentation_vertexai-0.21.0}/PKG-INFO +1 -1
- {opentelemetry_instrumentation_vertexai-0.19.0 → opentelemetry_instrumentation_vertexai-0.21.0}/opentelemetry/instrumentation/vertexai/__init__.py +44 -78
- opentelemetry_instrumentation_vertexai-0.21.0/opentelemetry/instrumentation/vertexai/version.py +1 -0
- {opentelemetry_instrumentation_vertexai-0.19.0 → opentelemetry_instrumentation_vertexai-0.21.0}/pyproject.toml +1 -1
- opentelemetry_instrumentation_vertexai-0.19.0/opentelemetry/instrumentation/vertexai/version.py +0 -1
- {opentelemetry_instrumentation_vertexai-0.19.0 → opentelemetry_instrumentation_vertexai-0.21.0}/README.md +0 -0
- {opentelemetry_instrumentation_vertexai-0.19.0 → opentelemetry_instrumentation_vertexai-0.21.0}/opentelemetry/instrumentation/vertexai/config.py +0 -0
- {opentelemetry_instrumentation_vertexai-0.19.0 → opentelemetry_instrumentation_vertexai-0.21.0}/opentelemetry/instrumentation/vertexai/utils.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: opentelemetry-instrumentation-vertexai
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.21.0
|
|
4
4
|
Summary: OpenTelemetry Vertex AI instrumentation
|
|
5
5
|
Home-page: https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-vertexai
|
|
6
6
|
License: Apache-2.0
|
|
@@ -22,26 +22,24 @@ logger = logging.getLogger(__name__)
|
|
|
22
22
|
|
|
23
23
|
_instruments = ("google-cloud-aiplatform >= 1.38.1",)
|
|
24
24
|
|
|
25
|
-
llm_model = "unknown"
|
|
26
|
-
|
|
27
25
|
WRAPPED_METHODS = [
|
|
28
26
|
{
|
|
29
|
-
"package": "vertexai.
|
|
27
|
+
"package": "vertexai.generative_models",
|
|
30
28
|
"object": "GenerativeModel",
|
|
31
|
-
"method": "
|
|
32
|
-
"span_name": "vertexai.
|
|
29
|
+
"method": "generate_content",
|
|
30
|
+
"span_name": "vertexai.generate_content",
|
|
33
31
|
},
|
|
34
32
|
{
|
|
35
|
-
"package": "vertexai.
|
|
33
|
+
"package": "vertexai.generative_models",
|
|
36
34
|
"object": "GenerativeModel",
|
|
37
|
-
"method": "
|
|
35
|
+
"method": "generate_content_async",
|
|
38
36
|
"span_name": "vertexai.generate_content",
|
|
39
37
|
},
|
|
40
38
|
{
|
|
41
|
-
"package": "vertexai.
|
|
42
|
-
"object": "
|
|
43
|
-
"method": "
|
|
44
|
-
"span_name": "vertexai.
|
|
39
|
+
"package": "vertexai.preview.generative_models",
|
|
40
|
+
"object": "GenerativeModel",
|
|
41
|
+
"method": "generate_content",
|
|
42
|
+
"span_name": "vertexai.generate_content",
|
|
45
43
|
},
|
|
46
44
|
{
|
|
47
45
|
"package": "vertexai.language_models",
|
|
@@ -67,12 +65,6 @@ WRAPPED_METHODS = [
|
|
|
67
65
|
"method": "predict_streaming_async",
|
|
68
66
|
"span_name": "vertexai.predict",
|
|
69
67
|
},
|
|
70
|
-
{
|
|
71
|
-
"package": "vertexai.language_models",
|
|
72
|
-
"object": "ChatModel",
|
|
73
|
-
"method": "from_pretrained",
|
|
74
|
-
"span_name": "vertexai.from_pretrained",
|
|
75
|
-
},
|
|
76
68
|
{
|
|
77
69
|
"package": "vertexai.language_models",
|
|
78
70
|
"object": "ChatSession",
|
|
@@ -90,7 +82,7 @@ WRAPPED_METHODS = [
|
|
|
90
82
|
|
|
91
83
|
def should_send_prompts():
|
|
92
84
|
return (
|
|
93
|
-
|
|
85
|
+
os.getenv("TRACELOOP_TRACE_CONTENT") or "true"
|
|
94
86
|
).lower() == "true" or context_api.get_value("override_enable_content_tracing")
|
|
95
87
|
|
|
96
88
|
|
|
@@ -109,7 +101,7 @@ def _set_span_attribute(span, name, value):
|
|
|
109
101
|
return
|
|
110
102
|
|
|
111
103
|
|
|
112
|
-
def _set_input_attributes(span, args, kwargs):
|
|
104
|
+
def _set_input_attributes(span, args, kwargs, llm_model):
|
|
113
105
|
if should_send_prompts() and args is not None and len(args) > 0:
|
|
114
106
|
prompt = ""
|
|
115
107
|
for arg in args:
|
|
@@ -126,33 +118,25 @@ def _set_input_attributes(span, args, kwargs):
|
|
|
126
118
|
)
|
|
127
119
|
|
|
128
120
|
_set_span_attribute(span, SpanAttributes.LLM_REQUEST_MODEL, llm_model)
|
|
129
|
-
_set_span_attribute(
|
|
130
|
-
span, f"{SpanAttributes.LLM_PROMPTS}.0.user", kwargs.get("prompt")
|
|
131
|
-
)
|
|
121
|
+
_set_span_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.0.user", kwargs.get("prompt"))
|
|
132
122
|
_set_span_attribute(span, SpanAttributes.LLM_REQUEST_TEMPERATURE, kwargs.get("temperature"))
|
|
133
123
|
_set_span_attribute(
|
|
134
124
|
span, SpanAttributes.LLM_REQUEST_MAX_TOKENS, kwargs.get("max_output_tokens")
|
|
135
125
|
)
|
|
136
126
|
_set_span_attribute(span, SpanAttributes.LLM_REQUEST_TOP_P, kwargs.get("top_p"))
|
|
137
127
|
_set_span_attribute(span, SpanAttributes.LLM_TOP_K, kwargs.get("top_k"))
|
|
138
|
-
_set_span_attribute(
|
|
139
|
-
|
|
140
|
-
)
|
|
141
|
-
_set_span_attribute(
|
|
142
|
-
span, SpanAttributes.LLM_FREQUENCY_PENALTY, kwargs.get("frequency_penalty")
|
|
143
|
-
)
|
|
128
|
+
_set_span_attribute(span, SpanAttributes.LLM_PRESENCE_PENALTY, kwargs.get("presence_penalty"))
|
|
129
|
+
_set_span_attribute(span, SpanAttributes.LLM_FREQUENCY_PENALTY, kwargs.get("frequency_penalty"))
|
|
144
130
|
|
|
145
131
|
return
|
|
146
132
|
|
|
147
133
|
|
|
148
134
|
@dont_throw
|
|
149
|
-
def _set_response_attributes(span, response):
|
|
135
|
+
def _set_response_attributes(span, response, llm_model):
|
|
150
136
|
_set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, llm_model)
|
|
151
137
|
|
|
152
138
|
if hasattr(response, "text"):
|
|
153
|
-
if hasattr(response, "_raw_response") and hasattr(
|
|
154
|
-
response._raw_response, "usage_metadata"
|
|
155
|
-
):
|
|
139
|
+
if hasattr(response, "_raw_response") and hasattr(response._raw_response, "usage_metadata"):
|
|
156
140
|
_set_span_attribute(
|
|
157
141
|
span,
|
|
158
142
|
SpanAttributes.LLM_USAGE_TOTAL_TOKENS,
|
|
@@ -174,23 +158,19 @@ def _set_response_attributes(span, response):
|
|
|
174
158
|
prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
|
|
175
159
|
_set_span_attribute(span, f"{prefix}.content", item.text)
|
|
176
160
|
elif isinstance(response.text, str):
|
|
177
|
-
_set_span_attribute(
|
|
178
|
-
span, f"{SpanAttributes.LLM_COMPLETIONS}.0.content", response.text
|
|
179
|
-
)
|
|
161
|
+
_set_span_attribute(span, f"{SpanAttributes.LLM_COMPLETIONS}.0.content", response.text)
|
|
180
162
|
else:
|
|
181
163
|
if isinstance(response, list):
|
|
182
164
|
for index, item in enumerate(response):
|
|
183
165
|
prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
|
|
184
166
|
_set_span_attribute(span, f"{prefix}.content", item)
|
|
185
167
|
elif isinstance(response, str):
|
|
186
|
-
_set_span_attribute(
|
|
187
|
-
span, f"{SpanAttributes.LLM_COMPLETIONS}.0.content", response
|
|
188
|
-
)
|
|
168
|
+
_set_span_attribute(span, f"{SpanAttributes.LLM_COMPLETIONS}.0.content", response)
|
|
189
169
|
|
|
190
170
|
return
|
|
191
171
|
|
|
192
172
|
|
|
193
|
-
def _build_from_streaming_response(span, response):
|
|
173
|
+
def _build_from_streaming_response(span, response, llm_model):
|
|
194
174
|
complete_response = ""
|
|
195
175
|
for item in response:
|
|
196
176
|
item_to_yield = item
|
|
@@ -198,13 +178,13 @@ def _build_from_streaming_response(span, response):
|
|
|
198
178
|
|
|
199
179
|
yield item_to_yield
|
|
200
180
|
|
|
201
|
-
_set_response_attributes(span, complete_response)
|
|
181
|
+
_set_response_attributes(span, complete_response, llm_model)
|
|
202
182
|
|
|
203
183
|
span.set_status(Status(StatusCode.OK))
|
|
204
184
|
span.end()
|
|
205
185
|
|
|
206
186
|
|
|
207
|
-
async def _abuild_from_streaming_response(span, response):
|
|
187
|
+
async def _abuild_from_streaming_response(span, response, llm_model):
|
|
208
188
|
complete_response = ""
|
|
209
189
|
async for item in response:
|
|
210
190
|
item_to_yield = item
|
|
@@ -212,22 +192,22 @@ async def _abuild_from_streaming_response(span, response):
|
|
|
212
192
|
|
|
213
193
|
yield item_to_yield
|
|
214
194
|
|
|
215
|
-
_set_response_attributes(span, complete_response)
|
|
195
|
+
_set_response_attributes(span, complete_response, llm_model)
|
|
216
196
|
|
|
217
197
|
span.set_status(Status(StatusCode.OK))
|
|
218
198
|
span.end()
|
|
219
199
|
|
|
220
200
|
|
|
221
201
|
@dont_throw
|
|
222
|
-
def _handle_request(span, args, kwargs):
|
|
202
|
+
def _handle_request(span, args, kwargs, llm_model):
|
|
223
203
|
if span.is_recording():
|
|
224
|
-
_set_input_attributes(span, args, kwargs)
|
|
204
|
+
_set_input_attributes(span, args, kwargs, llm_model)
|
|
225
205
|
|
|
226
206
|
|
|
227
207
|
@dont_throw
|
|
228
|
-
def _handle_response(span, response):
|
|
208
|
+
def _handle_response(span, response, llm_model):
|
|
229
209
|
if span.is_recording():
|
|
230
|
-
_set_response_attributes(span, response)
|
|
210
|
+
_set_response_attributes(span, response, llm_model)
|
|
231
211
|
|
|
232
212
|
span.set_status(Status(StatusCode.OK))
|
|
233
213
|
|
|
@@ -250,18 +230,11 @@ async def _awrap(tracer, to_wrap, wrapped, instance, args, kwargs):
|
|
|
250
230
|
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
|
|
251
231
|
return await wrapped(*args, **kwargs)
|
|
252
232
|
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
or to_wrap.get("method") == "__init__"
|
|
259
|
-
)
|
|
260
|
-
and args is not None
|
|
261
|
-
and len(args) > 0
|
|
262
|
-
):
|
|
263
|
-
llm_model = args[0]
|
|
264
|
-
return await wrapped(*args, **kwargs)
|
|
233
|
+
llm_model = "unknown"
|
|
234
|
+
if hasattr(instance, "_model_id"):
|
|
235
|
+
llm_model = instance._model_id
|
|
236
|
+
if hasattr(instance, "_model_name"):
|
|
237
|
+
llm_model = instance._model_name.replace("publishers/google/models/", "")
|
|
265
238
|
|
|
266
239
|
name = to_wrap.get("span_name")
|
|
267
240
|
span = tracer.start_span(
|
|
@@ -273,17 +246,17 @@ async def _awrap(tracer, to_wrap, wrapped, instance, args, kwargs):
|
|
|
273
246
|
},
|
|
274
247
|
)
|
|
275
248
|
|
|
276
|
-
_handle_request(span, args, kwargs)
|
|
249
|
+
_handle_request(span, args, kwargs, llm_model)
|
|
277
250
|
|
|
278
251
|
response = await wrapped(*args, **kwargs)
|
|
279
252
|
|
|
280
253
|
if response:
|
|
281
254
|
if is_streaming_response(response):
|
|
282
|
-
return _build_from_streaming_response(span, response)
|
|
255
|
+
return _build_from_streaming_response(span, response, llm_model)
|
|
283
256
|
elif is_async_streaming_response(response):
|
|
284
|
-
return _abuild_from_streaming_response(span, response)
|
|
257
|
+
return _abuild_from_streaming_response(span, response, llm_model)
|
|
285
258
|
else:
|
|
286
|
-
_handle_response(span, response)
|
|
259
|
+
_handle_response(span, response, llm_model)
|
|
287
260
|
|
|
288
261
|
span.end()
|
|
289
262
|
return response
|
|
@@ -295,18 +268,11 @@ def _wrap(tracer, to_wrap, wrapped, instance, args, kwargs):
|
|
|
295
268
|
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
|
|
296
269
|
return wrapped(*args, **kwargs)
|
|
297
270
|
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
or to_wrap.get("method") == "__init__"
|
|
304
|
-
)
|
|
305
|
-
and args is not None
|
|
306
|
-
and len(args) > 0
|
|
307
|
-
):
|
|
308
|
-
llm_model = args[0]
|
|
309
|
-
return wrapped(*args, **kwargs)
|
|
271
|
+
llm_model = "unknown"
|
|
272
|
+
if hasattr(instance, "_model_id"):
|
|
273
|
+
llm_model = instance._model_id
|
|
274
|
+
if hasattr(instance, "_model_name"):
|
|
275
|
+
llm_model = instance._model_name.replace("publishers/google/models/", "")
|
|
310
276
|
|
|
311
277
|
name = to_wrap.get("span_name")
|
|
312
278
|
span = tracer.start_span(
|
|
@@ -318,17 +284,17 @@ def _wrap(tracer, to_wrap, wrapped, instance, args, kwargs):
|
|
|
318
284
|
},
|
|
319
285
|
)
|
|
320
286
|
|
|
321
|
-
_handle_request(span, args, kwargs)
|
|
287
|
+
_handle_request(span, args, kwargs, llm_model)
|
|
322
288
|
|
|
323
289
|
response = wrapped(*args, **kwargs)
|
|
324
290
|
|
|
325
291
|
if response:
|
|
326
292
|
if is_streaming_response(response):
|
|
327
|
-
return _build_from_streaming_response(span, response)
|
|
293
|
+
return _build_from_streaming_response(span, response, llm_model)
|
|
328
294
|
elif is_async_streaming_response(response):
|
|
329
|
-
return _abuild_from_streaming_response(span, response)
|
|
295
|
+
return _abuild_from_streaming_response(span, response, llm_model)
|
|
330
296
|
else:
|
|
331
|
-
_handle_response(span, response)
|
|
297
|
+
_handle_response(span, response, llm_model)
|
|
332
298
|
|
|
333
299
|
span.end()
|
|
334
300
|
return response
|
opentelemetry_instrumentation_vertexai-0.21.0/opentelemetry/instrumentation/vertexai/version.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.21.0"
|
opentelemetry_instrumentation_vertexai-0.19.0/opentelemetry/instrumentation/vertexai/version.py
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
__version__ = "0.19.0"
|
|
File without changes
|
|
File without changes
|