opentelemetry-instrumentation-vertexai 0.33.6__tar.gz → 0.33.8__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of opentelemetry-instrumentation-vertexai might be problematic. Click here for more details.
- {opentelemetry_instrumentation_vertexai-0.33.6 → opentelemetry_instrumentation_vertexai-0.33.8}/PKG-INFO +1 -1
- {opentelemetry_instrumentation_vertexai-0.33.6 → opentelemetry_instrumentation_vertexai-0.33.8}/opentelemetry/instrumentation/vertexai/__init__.py +45 -43
- opentelemetry_instrumentation_vertexai-0.33.8/opentelemetry/instrumentation/vertexai/version.py +1 -0
- {opentelemetry_instrumentation_vertexai-0.33.6 → opentelemetry_instrumentation_vertexai-0.33.8}/pyproject.toml +1 -1
- opentelemetry_instrumentation_vertexai-0.33.6/opentelemetry/instrumentation/vertexai/version.py +0 -1
- {opentelemetry_instrumentation_vertexai-0.33.6 → opentelemetry_instrumentation_vertexai-0.33.8}/README.md +0 -0
- {opentelemetry_instrumentation_vertexai-0.33.6 → opentelemetry_instrumentation_vertexai-0.33.8}/opentelemetry/instrumentation/vertexai/config.py +0 -0
- {opentelemetry_instrumentation_vertexai-0.33.6 → opentelemetry_instrumentation_vertexai-0.33.8}/opentelemetry/instrumentation/vertexai/utils.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: opentelemetry-instrumentation-vertexai
|
|
3
|
-
Version: 0.33.
|
|
3
|
+
Version: 0.33.8
|
|
4
4
|
Summary: OpenTelemetry Vertex AI instrumentation
|
|
5
5
|
Home-page: https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-vertexai
|
|
6
6
|
License: Apache-2.0
|
|
@@ -32,60 +32,70 @@ WRAPPED_METHODS = [
|
|
|
32
32
|
"object": "GenerativeModel",
|
|
33
33
|
"method": "generate_content",
|
|
34
34
|
"span_name": "vertexai.generate_content",
|
|
35
|
+
"is_async": False,
|
|
35
36
|
},
|
|
36
37
|
{
|
|
37
38
|
"package": "vertexai.generative_models",
|
|
38
39
|
"object": "GenerativeModel",
|
|
39
40
|
"method": "generate_content_async",
|
|
40
41
|
"span_name": "vertexai.generate_content_async",
|
|
42
|
+
"is_async": True,
|
|
41
43
|
},
|
|
42
44
|
{
|
|
43
45
|
"package": "vertexai.preview.generative_models",
|
|
44
46
|
"object": "GenerativeModel",
|
|
45
47
|
"method": "generate_content",
|
|
46
48
|
"span_name": "vertexai.generate_content",
|
|
49
|
+
"is_async": False,
|
|
47
50
|
},
|
|
48
51
|
{
|
|
49
52
|
"package": "vertexai.preview.generative_models",
|
|
50
53
|
"object": "GenerativeModel",
|
|
51
54
|
"method": "generate_content_async",
|
|
52
55
|
"span_name": "vertexai.generate_content_async",
|
|
56
|
+
"is_async": True,
|
|
53
57
|
},
|
|
54
58
|
{
|
|
55
59
|
"package": "vertexai.language_models",
|
|
56
60
|
"object": "TextGenerationModel",
|
|
57
61
|
"method": "predict",
|
|
58
62
|
"span_name": "vertexai.predict",
|
|
63
|
+
"is_async": False,
|
|
59
64
|
},
|
|
60
65
|
{
|
|
61
66
|
"package": "vertexai.language_models",
|
|
62
67
|
"object": "TextGenerationModel",
|
|
63
68
|
"method": "predict_async",
|
|
64
69
|
"span_name": "vertexai.predict_async",
|
|
70
|
+
"is_async": True,
|
|
65
71
|
},
|
|
66
72
|
{
|
|
67
73
|
"package": "vertexai.language_models",
|
|
68
74
|
"object": "TextGenerationModel",
|
|
69
75
|
"method": "predict_streaming",
|
|
70
76
|
"span_name": "vertexai.predict_streaming",
|
|
77
|
+
"is_async": False,
|
|
71
78
|
},
|
|
72
79
|
{
|
|
73
80
|
"package": "vertexai.language_models",
|
|
74
81
|
"object": "TextGenerationModel",
|
|
75
82
|
"method": "predict_streaming_async",
|
|
76
83
|
"span_name": "vertexai.predict_streaming_async",
|
|
84
|
+
"is_async": True,
|
|
77
85
|
},
|
|
78
86
|
{
|
|
79
87
|
"package": "vertexai.language_models",
|
|
80
88
|
"object": "ChatSession",
|
|
81
89
|
"method": "send_message",
|
|
82
90
|
"span_name": "vertexai.send_message",
|
|
91
|
+
"is_async": False,
|
|
83
92
|
},
|
|
84
93
|
{
|
|
85
94
|
"package": "vertexai.language_models",
|
|
86
95
|
"object": "ChatSession",
|
|
87
96
|
"method": "send_message_streaming",
|
|
88
97
|
"span_name": "vertexai.send_message_streaming",
|
|
98
|
+
"is_async": False,
|
|
89
99
|
},
|
|
90
100
|
]
|
|
91
101
|
|
|
@@ -150,59 +160,46 @@ def _set_input_attributes(span, args, kwargs, llm_model):
|
|
|
150
160
|
|
|
151
161
|
|
|
152
162
|
@dont_throw
|
|
153
|
-
def _set_response_attributes(span,
|
|
163
|
+
def _set_response_attributes(span, llm_model, generation_text, token_usage):
|
|
154
164
|
_set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, llm_model)
|
|
155
165
|
|
|
156
|
-
if
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
SpanAttributes.LLM_USAGE_PROMPT_TOKENS,
|
|
173
|
-
response._raw_response.usage_metadata.prompt_token_count,
|
|
174
|
-
)
|
|
175
|
-
|
|
176
|
-
if isinstance(response.text, list):
|
|
177
|
-
for index, item in enumerate(response):
|
|
178
|
-
prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
|
|
179
|
-
_set_span_attribute(span, f"{prefix}.content", item.text)
|
|
180
|
-
elif isinstance(response.text, str):
|
|
181
|
-
_set_span_attribute(
|
|
182
|
-
span, f"{SpanAttributes.LLM_COMPLETIONS}.0.content", response.text
|
|
183
|
-
)
|
|
184
|
-
else:
|
|
185
|
-
if isinstance(response, list):
|
|
186
|
-
for index, item in enumerate(response):
|
|
187
|
-
prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
|
|
188
|
-
_set_span_attribute(span, f"{prefix}.content", item)
|
|
189
|
-
elif isinstance(response, str):
|
|
190
|
-
_set_span_attribute(
|
|
191
|
-
span, f"{SpanAttributes.LLM_COMPLETIONS}.0.content", response
|
|
192
|
-
)
|
|
166
|
+
if token_usage:
|
|
167
|
+
_set_span_attribute(
|
|
168
|
+
span,
|
|
169
|
+
SpanAttributes.LLM_USAGE_TOTAL_TOKENS,
|
|
170
|
+
token_usage.total_token_count,
|
|
171
|
+
)
|
|
172
|
+
_set_span_attribute(
|
|
173
|
+
span,
|
|
174
|
+
SpanAttributes.LLM_USAGE_COMPLETION_TOKENS,
|
|
175
|
+
token_usage.candidates_token_count,
|
|
176
|
+
)
|
|
177
|
+
_set_span_attribute(
|
|
178
|
+
span,
|
|
179
|
+
SpanAttributes.LLM_USAGE_PROMPT_TOKENS,
|
|
180
|
+
token_usage.prompt_token_count,
|
|
181
|
+
)
|
|
193
182
|
|
|
194
|
-
|
|
183
|
+
_set_span_attribute(span, f"{SpanAttributes.LLM_COMPLETIONS}.0.role", "assistant")
|
|
184
|
+
_set_span_attribute(
|
|
185
|
+
span,
|
|
186
|
+
f"{SpanAttributes.LLM_COMPLETIONS}.0.content",
|
|
187
|
+
generation_text,
|
|
188
|
+
)
|
|
195
189
|
|
|
196
190
|
|
|
197
191
|
def _build_from_streaming_response(span, response, llm_model):
|
|
198
192
|
complete_response = ""
|
|
193
|
+
token_usage = None
|
|
199
194
|
for item in response:
|
|
200
195
|
item_to_yield = item
|
|
201
196
|
complete_response += str(item.text)
|
|
197
|
+
if item.usage_metadata:
|
|
198
|
+
token_usage = item.usage_metadata
|
|
202
199
|
|
|
203
200
|
yield item_to_yield
|
|
204
201
|
|
|
205
|
-
_set_response_attributes(span, complete_response,
|
|
202
|
+
_set_response_attributes(span, llm_model, complete_response, token_usage)
|
|
206
203
|
|
|
207
204
|
span.set_status(Status(StatusCode.OK))
|
|
208
205
|
span.end()
|
|
@@ -210,13 +207,16 @@ def _build_from_streaming_response(span, response, llm_model):
|
|
|
210
207
|
|
|
211
208
|
async def _abuild_from_streaming_response(span, response, llm_model):
|
|
212
209
|
complete_response = ""
|
|
210
|
+
token_usage = None
|
|
213
211
|
async for item in response:
|
|
214
212
|
item_to_yield = item
|
|
215
213
|
complete_response += str(item.text)
|
|
214
|
+
if item.usage_metadata:
|
|
215
|
+
token_usage = item.usage_metadata
|
|
216
216
|
|
|
217
217
|
yield item_to_yield
|
|
218
218
|
|
|
219
|
-
_set_response_attributes(span, complete_response,
|
|
219
|
+
_set_response_attributes(span, llm_model, complete_response, token_usage)
|
|
220
220
|
|
|
221
221
|
span.set_status(Status(StatusCode.OK))
|
|
222
222
|
span.end()
|
|
@@ -231,7 +231,9 @@ def _handle_request(span, args, kwargs, llm_model):
|
|
|
231
231
|
@dont_throw
|
|
232
232
|
def _handle_response(span, response, llm_model):
|
|
233
233
|
if span.is_recording():
|
|
234
|
-
_set_response_attributes(
|
|
234
|
+
_set_response_attributes(
|
|
235
|
+
span, llm_model, response.candidates[0].text, response.usage_metadata
|
|
236
|
+
)
|
|
235
237
|
|
|
236
238
|
span.set_status(Status(StatusCode.OK))
|
|
237
239
|
|
|
@@ -351,7 +353,7 @@ class VertexAIInstrumentor(BaseInstrumentor):
|
|
|
351
353
|
f"{wrap_object}.{wrap_method}",
|
|
352
354
|
(
|
|
353
355
|
_awrap(tracer, wrapped_method)
|
|
354
|
-
if
|
|
356
|
+
if wrapped_method.get("is_async")
|
|
355
357
|
else _wrap(tracer, wrapped_method)
|
|
356
358
|
),
|
|
357
359
|
)
|
opentelemetry_instrumentation_vertexai-0.33.8/opentelemetry/instrumentation/vertexai/version.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.33.8"
|
opentelemetry_instrumentation_vertexai-0.33.6/opentelemetry/instrumentation/vertexai/version.py
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
__version__ = "0.33.6"
|
|
File without changes
|
|
File without changes
|