opentelemetry-instrumentation-vertexai 0.8.0__py3-none-any.whl → 0.9.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of opentelemetry-instrumentation-vertexai might be problematic. Click here for more details.
- opentelemetry/instrumentation/vertexai/__init__.py +370 -0
- opentelemetry/instrumentation/vertexai/version.py +1 -1
- opentelemetry_instrumentation_vertexai-0.9.0.dist-info/METADATA +45 -0
- opentelemetry_instrumentation_vertexai-0.9.0.dist-info/RECORD +5 -0
- opentelemetry_instrumentation_vertexai-0.8.0.dist-info/METADATA +0 -22
- opentelemetry_instrumentation_vertexai-0.8.0.dist-info/RECORD +0 -5
- {opentelemetry_instrumentation_vertexai-0.8.0.dist-info → opentelemetry_instrumentation_vertexai-0.9.0.dist-info}/WHEEL +0 -0
|
@@ -1 +1,371 @@
|
|
|
1
1
|
"""OpenTelemetry Vertex AI instrumentation"""
|
|
2
|
+
import logging
|
|
3
|
+
import os
|
|
4
|
+
import types
|
|
5
|
+
from typing import Collection
|
|
6
|
+
from wrapt import wrap_function_wrapper
|
|
7
|
+
|
|
8
|
+
from opentelemetry import context as context_api
|
|
9
|
+
from opentelemetry.trace import get_tracer, SpanKind
|
|
10
|
+
from opentelemetry.trace.status import Status, StatusCode
|
|
11
|
+
|
|
12
|
+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
|
13
|
+
from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY, unwrap
|
|
14
|
+
|
|
15
|
+
from opentelemetry.semconv.ai import SpanAttributes, LLMRequestTypeValues
|
|
16
|
+
from opentelemetry.instrumentation.vertexai.version import __version__
|
|
17
|
+
|
|
18
|
+
logger = logging.getLogger(__name__)
|
|
19
|
+
|
|
20
|
+
_instruments = ("google-cloud-aiplatform >= 1.38.1",)
|
|
21
|
+
|
|
22
|
+
llm_model = "unknown"
|
|
23
|
+
|
|
24
|
+
WRAPPED_METHODS = [
|
|
25
|
+
{
|
|
26
|
+
"package": "vertexai.preview.generative_models",
|
|
27
|
+
"object": "GenerativeModel",
|
|
28
|
+
"method": "__init__",
|
|
29
|
+
"span_name": "vertexai.__init__",
|
|
30
|
+
},
|
|
31
|
+
{
|
|
32
|
+
"package": "vertexai.preview.generative_models",
|
|
33
|
+
"object": "GenerativeModel",
|
|
34
|
+
"method": "generate_content",
|
|
35
|
+
"span_name": "vertexai.generate_content",
|
|
36
|
+
},
|
|
37
|
+
{
|
|
38
|
+
"package": "vertexai.language_models",
|
|
39
|
+
"object": "TextGenerationModel",
|
|
40
|
+
"method": "from_pretrained",
|
|
41
|
+
"span_name": "vertexai.from_pretrained",
|
|
42
|
+
},
|
|
43
|
+
{
|
|
44
|
+
"package": "vertexai.language_models",
|
|
45
|
+
"object": "TextGenerationModel",
|
|
46
|
+
"method": "predict",
|
|
47
|
+
"span_name": "vertexai.predict",
|
|
48
|
+
},
|
|
49
|
+
{
|
|
50
|
+
"package": "vertexai.language_models",
|
|
51
|
+
"object": "TextGenerationModel",
|
|
52
|
+
"method": "predict_async",
|
|
53
|
+
"span_name": "vertexai.predict",
|
|
54
|
+
},
|
|
55
|
+
{
|
|
56
|
+
"package": "vertexai.language_models",
|
|
57
|
+
"object": "TextGenerationModel",
|
|
58
|
+
"method": "predict_streaming",
|
|
59
|
+
"span_name": "vertexai.predict",
|
|
60
|
+
},
|
|
61
|
+
{
|
|
62
|
+
"package": "vertexai.language_models",
|
|
63
|
+
"object": "TextGenerationModel",
|
|
64
|
+
"method": "predict_streaming_async",
|
|
65
|
+
"span_name": "vertexai.predict",
|
|
66
|
+
},
|
|
67
|
+
{
|
|
68
|
+
"package": "vertexai.language_models",
|
|
69
|
+
"object": "ChatModel",
|
|
70
|
+
"method": "from_pretrained",
|
|
71
|
+
"span_name": "vertexai.from_pretrained",
|
|
72
|
+
},
|
|
73
|
+
{
|
|
74
|
+
"package": "vertexai.language_models",
|
|
75
|
+
"object": "ChatSession",
|
|
76
|
+
"method": "send_message",
|
|
77
|
+
"span_name": "vertexai.send_message",
|
|
78
|
+
},
|
|
79
|
+
{
|
|
80
|
+
"package": "vertexai.language_models",
|
|
81
|
+
"object": "ChatSession",
|
|
82
|
+
"method": "send_message_streaming",
|
|
83
|
+
"span_name": "vertexai.send_message",
|
|
84
|
+
},
|
|
85
|
+
]
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def should_send_prompts():
|
|
89
|
+
return (
|
|
90
|
+
os.getenv("TRACELOOP_TRACE_CONTENT") or "true"
|
|
91
|
+
).lower() == "true" or context_api.get_value("override_enable_content_tracing")
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def is_streaming_response(response):
|
|
95
|
+
return isinstance(response, types.GeneratorType)
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def is_async_streaming_response(response):
|
|
99
|
+
return isinstance(response, types.AsyncGeneratorType)
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def _set_span_attribute(span, name, value):
|
|
103
|
+
if value is not None:
|
|
104
|
+
if value != "":
|
|
105
|
+
span.set_attribute(name, value)
|
|
106
|
+
return
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def _set_input_attributes(span, args, kwargs):
|
|
110
|
+
if should_send_prompts() and args is not None and len(args) > 0:
|
|
111
|
+
prompt = ""
|
|
112
|
+
for arg in args:
|
|
113
|
+
if isinstance(arg, str):
|
|
114
|
+
prompt = f"{prompt}{arg}\n"
|
|
115
|
+
elif isinstance(arg, list):
|
|
116
|
+
for subarg in arg:
|
|
117
|
+
prompt = f"{prompt}{subarg}\n"
|
|
118
|
+
|
|
119
|
+
_set_span_attribute(
|
|
120
|
+
span,
|
|
121
|
+
f"{SpanAttributes.LLM_PROMPTS}.0.user",
|
|
122
|
+
prompt,
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
_set_span_attribute(span, SpanAttributes.LLM_REQUEST_MODEL, llm_model)
|
|
126
|
+
_set_span_attribute(
|
|
127
|
+
span, f"{SpanAttributes.LLM_PROMPTS}.0.user", kwargs.get("prompt")
|
|
128
|
+
)
|
|
129
|
+
_set_span_attribute(span, SpanAttributes.LLM_TEMPERATURE, kwargs.get("temperature"))
|
|
130
|
+
_set_span_attribute(
|
|
131
|
+
span, SpanAttributes.LLM_REQUEST_MAX_TOKENS, kwargs.get("max_output_tokens")
|
|
132
|
+
)
|
|
133
|
+
_set_span_attribute(span, SpanAttributes.LLM_TOP_P, kwargs.get("top_p"))
|
|
134
|
+
_set_span_attribute(span, SpanAttributes.LLM_TOP_K, kwargs.get("top_k"))
|
|
135
|
+
_set_span_attribute(
|
|
136
|
+
span, SpanAttributes.LLM_PRESENCE_PENALTY, kwargs.get("presence_penalty")
|
|
137
|
+
)
|
|
138
|
+
_set_span_attribute(
|
|
139
|
+
span, SpanAttributes.LLM_FREQUENCY_PENALTY, kwargs.get("frequency_penalty")
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
return
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def _set_response_attributes(span, response):
|
|
146
|
+
_set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, llm_model)
|
|
147
|
+
|
|
148
|
+
if hasattr(response, "text"):
|
|
149
|
+
if hasattr(response, "_raw_response") and hasattr(
|
|
150
|
+
response._raw_response, "usage_metadata"
|
|
151
|
+
):
|
|
152
|
+
_set_span_attribute(
|
|
153
|
+
span,
|
|
154
|
+
SpanAttributes.LLM_USAGE_TOTAL_TOKENS,
|
|
155
|
+
response._raw_response.usage_metadata.total_token_count,
|
|
156
|
+
)
|
|
157
|
+
_set_span_attribute(
|
|
158
|
+
span,
|
|
159
|
+
SpanAttributes.LLM_USAGE_COMPLETION_TOKENS,
|
|
160
|
+
response._raw_response.usage_metadata.candidates_token_count,
|
|
161
|
+
)
|
|
162
|
+
_set_span_attribute(
|
|
163
|
+
span,
|
|
164
|
+
SpanAttributes.LLM_USAGE_PROMPT_TOKENS,
|
|
165
|
+
response._raw_response.usage_metadata.prompt_token_count,
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
if isinstance(response.text, list):
|
|
169
|
+
for index, item in enumerate(response):
|
|
170
|
+
prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
|
|
171
|
+
_set_span_attribute(span, f"{prefix}.content", item.text)
|
|
172
|
+
elif isinstance(response.text, str):
|
|
173
|
+
_set_span_attribute(
|
|
174
|
+
span, f"{SpanAttributes.LLM_COMPLETIONS}.0.content", response.text
|
|
175
|
+
)
|
|
176
|
+
else:
|
|
177
|
+
if isinstance(response, list):
|
|
178
|
+
for index, item in enumerate(response):
|
|
179
|
+
prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
|
|
180
|
+
_set_span_attribute(span, f"{prefix}.content", item)
|
|
181
|
+
elif isinstance(response, str):
|
|
182
|
+
_set_span_attribute(
|
|
183
|
+
span, f"{SpanAttributes.LLM_COMPLETIONS}.0.content", response
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
return
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
def _build_from_streaming_response(span, response):
|
|
190
|
+
complete_response = ""
|
|
191
|
+
for item in response:
|
|
192
|
+
item_to_yield = item
|
|
193
|
+
complete_response += str(item.text)
|
|
194
|
+
|
|
195
|
+
yield item_to_yield
|
|
196
|
+
|
|
197
|
+
_set_response_attributes(span, complete_response)
|
|
198
|
+
|
|
199
|
+
span.set_status(Status(StatusCode.OK))
|
|
200
|
+
span.end()
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
async def _abuild_from_streaming_response(span, response):
|
|
204
|
+
complete_response = ""
|
|
205
|
+
async for item in response:
|
|
206
|
+
item_to_yield = item
|
|
207
|
+
complete_response += str(item.text)
|
|
208
|
+
|
|
209
|
+
yield item_to_yield
|
|
210
|
+
|
|
211
|
+
_set_response_attributes(span, complete_response)
|
|
212
|
+
|
|
213
|
+
span.set_status(Status(StatusCode.OK))
|
|
214
|
+
span.end()
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
def _handle_request(span, args, kwargs):
|
|
218
|
+
try:
|
|
219
|
+
if span.is_recording():
|
|
220
|
+
_set_input_attributes(span, args, kwargs)
|
|
221
|
+
|
|
222
|
+
except Exception as ex: # pylint: disable=broad-except
|
|
223
|
+
logger.warning(
|
|
224
|
+
"Failed to set input attributes for VertexAI span, error: %s", str(ex)
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
def _handle_response(span, response):
|
|
229
|
+
try:
|
|
230
|
+
if span.is_recording():
|
|
231
|
+
_set_response_attributes(span, response)
|
|
232
|
+
|
|
233
|
+
except Exception as ex: # pylint: disable=broad-except
|
|
234
|
+
logger.warning(
|
|
235
|
+
"Failed to set response attributes for VertexAI span, error: %s",
|
|
236
|
+
str(ex),
|
|
237
|
+
)
|
|
238
|
+
if span.is_recording():
|
|
239
|
+
span.set_status(Status(StatusCode.OK))
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
def _with_tracer_wrapper(func):
|
|
243
|
+
"""Helper for providing tracer for wrapper functions."""
|
|
244
|
+
|
|
245
|
+
def _with_tracer(tracer, to_wrap):
|
|
246
|
+
def wrapper(wrapped, instance, args, kwargs):
|
|
247
|
+
return func(tracer, to_wrap, wrapped, instance, args, kwargs)
|
|
248
|
+
|
|
249
|
+
return wrapper
|
|
250
|
+
|
|
251
|
+
return _with_tracer
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
@_with_tracer_wrapper
|
|
255
|
+
async def _awrap(tracer, to_wrap, wrapped, instance, args, kwargs):
|
|
256
|
+
"""Instruments and calls every function defined in TO_WRAP."""
|
|
257
|
+
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
|
|
258
|
+
return await wrapped(*args, **kwargs)
|
|
259
|
+
|
|
260
|
+
global llm_model
|
|
261
|
+
|
|
262
|
+
if (
|
|
263
|
+
(
|
|
264
|
+
to_wrap.get("method") == "from_pretrained"
|
|
265
|
+
or to_wrap.get("method") == "__init__"
|
|
266
|
+
)
|
|
267
|
+
and args is not None
|
|
268
|
+
and len(args) > 0
|
|
269
|
+
):
|
|
270
|
+
llm_model = args[0]
|
|
271
|
+
return await wrapped(*args, **kwargs)
|
|
272
|
+
|
|
273
|
+
name = to_wrap.get("span_name")
|
|
274
|
+
span = tracer.start_span(
|
|
275
|
+
name,
|
|
276
|
+
kind=SpanKind.CLIENT,
|
|
277
|
+
attributes={
|
|
278
|
+
SpanAttributes.LLM_VENDOR: "VertexAI",
|
|
279
|
+
SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
|
|
280
|
+
},
|
|
281
|
+
)
|
|
282
|
+
|
|
283
|
+
_handle_request(span, args, kwargs)
|
|
284
|
+
|
|
285
|
+
response = await wrapped(*args, **kwargs)
|
|
286
|
+
|
|
287
|
+
if response:
|
|
288
|
+
if is_streaming_response(response):
|
|
289
|
+
return _build_from_streaming_response(span, response)
|
|
290
|
+
elif is_async_streaming_response(response):
|
|
291
|
+
return _abuild_from_streaming_response(span, response)
|
|
292
|
+
else:
|
|
293
|
+
_handle_response(span, response)
|
|
294
|
+
|
|
295
|
+
span.end()
|
|
296
|
+
return response
|
|
297
|
+
|
|
298
|
+
|
|
299
|
+
@_with_tracer_wrapper
|
|
300
|
+
def _wrap(tracer, to_wrap, wrapped, instance, args, kwargs):
|
|
301
|
+
"""Instruments and calls every function defined in TO_WRAP."""
|
|
302
|
+
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
|
|
303
|
+
return wrapped(*args, **kwargs)
|
|
304
|
+
|
|
305
|
+
global llm_model
|
|
306
|
+
|
|
307
|
+
if (
|
|
308
|
+
(
|
|
309
|
+
to_wrap.get("method") == "from_pretrained"
|
|
310
|
+
or to_wrap.get("method") == "__init__"
|
|
311
|
+
)
|
|
312
|
+
and args is not None
|
|
313
|
+
and len(args) > 0
|
|
314
|
+
):
|
|
315
|
+
llm_model = args[0]
|
|
316
|
+
return wrapped(*args, **kwargs)
|
|
317
|
+
|
|
318
|
+
name = to_wrap.get("span_name")
|
|
319
|
+
span = tracer.start_span(
|
|
320
|
+
name,
|
|
321
|
+
kind=SpanKind.CLIENT,
|
|
322
|
+
attributes={
|
|
323
|
+
SpanAttributes.LLM_VENDOR: "VertexAI",
|
|
324
|
+
SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
|
|
325
|
+
},
|
|
326
|
+
)
|
|
327
|
+
|
|
328
|
+
_handle_request(span, args, kwargs)
|
|
329
|
+
|
|
330
|
+
response = wrapped(*args, **kwargs)
|
|
331
|
+
|
|
332
|
+
if response:
|
|
333
|
+
if is_streaming_response(response):
|
|
334
|
+
return _build_from_streaming_response(span, response)
|
|
335
|
+
elif is_async_streaming_response(response):
|
|
336
|
+
return _abuild_from_streaming_response(span, response)
|
|
337
|
+
else:
|
|
338
|
+
_handle_response(span, response)
|
|
339
|
+
|
|
340
|
+
span.end()
|
|
341
|
+
return response
|
|
342
|
+
|
|
343
|
+
|
|
344
|
+
class VertexAIInstrumentor(BaseInstrumentor):
|
|
345
|
+
"""An instrumentor for VertextAI's client library."""
|
|
346
|
+
|
|
347
|
+
def instrumentation_dependencies(self) -> Collection[str]:
|
|
348
|
+
return _instruments
|
|
349
|
+
|
|
350
|
+
def _instrument(self, **kwargs):
|
|
351
|
+
tracer_provider = kwargs.get("tracer_provider")
|
|
352
|
+
tracer = get_tracer(__name__, __version__, tracer_provider)
|
|
353
|
+
for wrapped_method in WRAPPED_METHODS:
|
|
354
|
+
wrap_package = wrapped_method.get("package")
|
|
355
|
+
wrap_object = wrapped_method.get("object")
|
|
356
|
+
wrap_method = wrapped_method.get("method")
|
|
357
|
+
|
|
358
|
+
wrap_function_wrapper(
|
|
359
|
+
wrap_package,
|
|
360
|
+
f"{wrap_object}.{wrap_method}",
|
|
361
|
+
_awrap(tracer, wrapped_method) if wrap_method == 'predict_async' else _wrap(tracer, wrapped_method),
|
|
362
|
+
)
|
|
363
|
+
|
|
364
|
+
def _uninstrument(self, **kwargs):
|
|
365
|
+
for wrapped_method in WRAPPED_METHODS:
|
|
366
|
+
wrap_package = wrapped_method.get("package")
|
|
367
|
+
wrap_object = wrapped_method.get("object")
|
|
368
|
+
unwrap(
|
|
369
|
+
f"{wrap_package}.{wrap_object}",
|
|
370
|
+
wrapped_method.get("method", ""),
|
|
371
|
+
)
|
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "0.
|
|
1
|
+
__version__ = "0.9.0"
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: opentelemetry-instrumentation-vertexai
|
|
3
|
+
Version: 0.9.0
|
|
4
|
+
Summary: OpenTelemetry Vertex AI instrumentation
|
|
5
|
+
License: Apache-2.0
|
|
6
|
+
Author: Gal Kleinman
|
|
7
|
+
Author-email: gal@traceloop.com
|
|
8
|
+
Requires-Python: >=3.8.1,<4
|
|
9
|
+
Classifier: License :: OSI Approved :: Apache Software License
|
|
10
|
+
Classifier: Programming Language :: Python :: 3
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
15
|
+
Requires-Dist: opentelemetry-api (>=1.21.0,<2.0.0)
|
|
16
|
+
Requires-Dist: opentelemetry-instrumentation (>=0.42b0,<0.43)
|
|
17
|
+
Requires-Dist: opentelemetry-semantic-conventions-ai (>=0.0.13,<0.0.14)
|
|
18
|
+
Description-Content-Type: text/markdown
|
|
19
|
+
|
|
20
|
+
# OpenTelemetry VertexAI Instrumentation
|
|
21
|
+
|
|
22
|
+
<a href="https://pypi.org/project/opentelemetry-instrumentation-vertexai/">
|
|
23
|
+
<img src="https://badge.fury.io/py/opentelemetry-instrumentation-vertexai.svg">
|
|
24
|
+
</a>
|
|
25
|
+
|
|
26
|
+
This library allows tracing VertexAI prompts and completions sent with the official [VertexAI library](https://github.com/googleapis/python-aiplatform).
|
|
27
|
+
|
|
28
|
+
## Installation
|
|
29
|
+
|
|
30
|
+
```bash
|
|
31
|
+
pip install opentelemetry-instrumentation-vertexai
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
## Privacy
|
|
35
|
+
|
|
36
|
+
**By default, this instrumentation logs prompts, completions, and embeddings to span attributes**. This gives you a clear visibility into how your LLM application is working, and can make it easy to debug and evaluate the quality of the outputs.
|
|
37
|
+
|
|
38
|
+
However, you may want to disable this logging for privacy reasons, as they may contain highly sensitive data from your users. You may also simply want to reduce the size of your traces.
|
|
39
|
+
|
|
40
|
+
To disable logging, set the `TRACELOOP_TRACE_CONTENT` environment variable to `false`.
|
|
41
|
+
|
|
42
|
+
```bash
|
|
43
|
+
TRACELOOP_TRACE_CONTENT=false
|
|
44
|
+
```
|
|
45
|
+
|
|
@@ -0,0 +1,5 @@
|
|
|
1
|
+
opentelemetry/instrumentation/vertexai/__init__.py,sha256=t-FnVuLBmzfINR5R3Dj5XJ9-VZI-KNHJomJ6q5Dlv6I,11457
|
|
2
|
+
opentelemetry/instrumentation/vertexai/version.py,sha256=H9NWRZb7NbeRRPLP_V1fARmLNXranorVM-OOY-8_2ug,22
|
|
3
|
+
opentelemetry_instrumentation_vertexai-0.9.0.dist-info/METADATA,sha256=cBG2sj9MRZPV5LXB_3iRZfIqa-xCPKuhddgVtsYJn6M,1792
|
|
4
|
+
opentelemetry_instrumentation_vertexai-0.9.0.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
|
|
5
|
+
opentelemetry_instrumentation_vertexai-0.9.0.dist-info/RECORD,,
|
|
@@ -1,22 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.1
|
|
2
|
-
Name: opentelemetry-instrumentation-vertexai
|
|
3
|
-
Version: 0.8.0
|
|
4
|
-
Summary: OpenTelemetry Vertex AI instrumentation
|
|
5
|
-
License: Apache-2.0
|
|
6
|
-
Author: Gal Kleinman
|
|
7
|
-
Author-email: gal@traceloop.com
|
|
8
|
-
Requires-Python: >=3.8.1,<3.12
|
|
9
|
-
Classifier: License :: OSI Approved :: Apache Software License
|
|
10
|
-
Classifier: Programming Language :: Python :: 3
|
|
11
|
-
Classifier: Programming Language :: Python :: 3.9
|
|
12
|
-
Classifier: Programming Language :: Python :: 3.10
|
|
13
|
-
Classifier: Programming Language :: Python :: 3.11
|
|
14
|
-
Requires-Dist: opentelemetry-api (>=1.21.0,<2.0.0)
|
|
15
|
-
Requires-Dist: opentelemetry-instrumentation (>=0.42b0,<0.43)
|
|
16
|
-
Requires-Dist: opentelemetry-semantic-conventions-ai (>=0.0.13,<0.0.14)
|
|
17
|
-
Description-Content-Type: text/markdown
|
|
18
|
-
|
|
19
|
-
# opentelemetry-instrumentation-vertexai
|
|
20
|
-
|
|
21
|
-
Project description here.
|
|
22
|
-
|
|
@@ -1,5 +0,0 @@
|
|
|
1
|
-
opentelemetry/instrumentation/vertexai/__init__.py,sha256=F1AyeLIRP4Q0BmTrmk-ZdpIm6P-FIiVU5Wv3Keauks0,46
|
|
2
|
-
opentelemetry/instrumentation/vertexai/version.py,sha256=iPlYCcIzuzW7T2HKDkmYlMkRI51dBLfNRxPPiWrfw9U,22
|
|
3
|
-
opentelemetry_instrumentation_vertexai-0.8.0.dist-info/METADATA,sha256=dn_NlcKt2HvC99JHhjX9iGjPNn8FMPBA0JI73buvtbg,793
|
|
4
|
-
opentelemetry_instrumentation_vertexai-0.8.0.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
|
|
5
|
-
opentelemetry_instrumentation_vertexai-0.8.0.dist-info/RECORD,,
|