opentelemetry-instrumentation-vertexai 0.47.3__py3-none-any.whl → 2.1b0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of opentelemetry-instrumentation-vertexai might be problematic. Click here for more details.

@@ -1,43 +1,459 @@
1
+ # Copyright The OpenTelemetry Authors
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # type: ignore[reportUnknownDeprecated]
16
+
17
+ from __future__ import annotations
18
+
1
19
  import logging
2
- import os
3
- import traceback
20
+ import re
21
+ from dataclasses import dataclass
22
+ from os import environ
23
+ from typing import (
24
+ TYPE_CHECKING,
25
+ Iterable,
26
+ Literal,
27
+ Mapping,
28
+ Sequence,
29
+ Union,
30
+ cast,
31
+ overload,
32
+ )
33
+ from urllib.parse import urlparse
4
34
 
5
- from opentelemetry import context as context_api
6
- from opentelemetry.instrumentation.vertexai.config import Config
35
+ from google.protobuf import json_format
7
36
 
8
- TRACELOOP_TRACE_CONTENT = "TRACELOOP_TRACE_CONTENT"
37
+ from opentelemetry._logs import LogRecord
38
+ from opentelemetry.instrumentation._semconv import (
39
+ _StabilityMode,
40
+ )
41
+ from opentelemetry.instrumentation.vertexai.events import (
42
+ ChoiceMessage,
43
+ ChoiceToolCall,
44
+ assistant_event,
45
+ choice_event,
46
+ system_event,
47
+ tool_event,
48
+ user_event,
49
+ )
50
+ from opentelemetry.semconv._incubating.attributes import (
51
+ gen_ai_attributes as GenAIAttributes,
52
+ )
53
+ from opentelemetry.semconv.attributes import server_attributes
54
+ from opentelemetry.util.genai.types import (
55
+ ContentCapturingMode,
56
+ FinishReason,
57
+ MessagePart,
58
+ Text,
59
+ ToolCall,
60
+ ToolCallResponse,
61
+ )
62
+ from opentelemetry.util.genai.utils import get_content_capturing_mode
63
+ from opentelemetry.util.types import AnyValue, AttributeValue
9
64
 
65
+ if TYPE_CHECKING:
66
+ from google.cloud.aiplatform_v1.types import (
67
+ content,
68
+ prediction_service,
69
+ tool,
70
+ )
71
+ from google.cloud.aiplatform_v1beta1.types import (
72
+ content as content_v1beta1,
73
+ )
74
+ from google.cloud.aiplatform_v1beta1.types import (
75
+ prediction_service as prediction_service_v1beta1,
76
+ )
77
+ from google.cloud.aiplatform_v1beta1.types import (
78
+ tool as tool_v1beta1,
79
+ )
10
80
 
11
- def should_send_prompts():
12
- return (
13
- os.getenv(TRACELOOP_TRACE_CONTENT) or "true"
14
- ).lower() == "true" or context_api.get_value("override_enable_content_tracing")
15
81
 
82
+ _MODEL = "model"
16
83
 
17
- def dont_throw(func):
18
- """
19
- A decorator that wraps the passed in function and logs exceptions instead of throwing them.
20
84
 
21
- @param func: The function to wrap
22
- @return: The wrapper function
85
+ @dataclass(frozen=True)
86
+ class GenerateContentParams:
87
+ model: str
88
+ contents: (
89
+ Sequence[content.Content] | Sequence[content_v1beta1.Content] | None
90
+ ) = None
91
+ system_instruction: content.Content | content_v1beta1.Content | None = None
92
+ tools: Sequence[tool.Tool] | Sequence[tool_v1beta1.Tool] | None = None
93
+ tool_config: tool.ToolConfig | tool_v1beta1.ToolConfig | None = None
94
+ labels: Mapping[str, str] | None = None
95
+ safety_settings: (
96
+ Sequence[content.SafetySetting]
97
+ | Sequence[content_v1beta1.SafetySetting]
98
+ | None
99
+ ) = None
100
+ generation_config: (
101
+ content.GenerationConfig | content_v1beta1.GenerationConfig | None
102
+ ) = None
103
+
104
+
105
+ def get_server_attributes(
106
+ endpoint: str,
107
+ ) -> dict[str, AttributeValue]:
108
+ """Get server.* attributes from the endpoint, which is a hostname with optional port e.g.
109
+ - ``us-central1-aiplatform.googleapis.com``
110
+ - ``us-central1-aiplatform.googleapis.com:5431``
23
111
  """
24
- # Obtain a logger specific to the function's module
25
- logger = logging.getLogger(func.__module__)
112
+ parsed = urlparse(f"scheme://{endpoint}")
113
+
114
+ if not parsed.hostname:
115
+ return {}
116
+
117
+ return {
118
+ server_attributes.SERVER_ADDRESS: parsed.hostname,
119
+ server_attributes.SERVER_PORT: parsed.port or 443,
120
+ }
26
121
 
27
- def wrapper(*args, **kwargs):
28
- try:
29
- return func(*args, **kwargs)
30
- except Exception as e:
31
- logger.debug(
32
- "OpenLLMetry failed to trace in %s, error: %s",
33
- func.__name__,
34
- traceback.format_exc(),
122
+
123
+ def get_genai_request_attributes( # pylint: disable=too-many-branches
124
+ use_latest_semconvs: bool,
125
+ params: GenerateContentParams,
126
+ operation_name: GenAIAttributes.GenAiOperationNameValues = GenAIAttributes.GenAiOperationNameValues.CHAT,
127
+ ):
128
+ model = _get_model_name(params.model)
129
+ generation_config = params.generation_config
130
+ attributes: dict[str, AttributeValue] = {
131
+ GenAIAttributes.GEN_AI_OPERATION_NAME: operation_name.value,
132
+ GenAIAttributes.GEN_AI_REQUEST_MODEL: model,
133
+ }
134
+ if not use_latest_semconvs:
135
+ attributes[GenAIAttributes.GEN_AI_SYSTEM] = (
136
+ GenAIAttributes.GenAiSystemValues.VERTEX_AI.value
137
+ )
138
+
139
+ if not generation_config:
140
+ return attributes
141
+
142
+ # Check for optional fields
143
+ # https://proto-plus-python.readthedocs.io/en/stable/fields.html#optional-fields
144
+ if "temperature" in generation_config:
145
+ attributes[GenAIAttributes.GEN_AI_REQUEST_TEMPERATURE] = (
146
+ generation_config.temperature
147
+ )
148
+ if "top_p" in generation_config:
149
+ # There is also a top_k parameter ( The maximum number of tokens to consider when sampling.),
150
+ # but no semconv yet exists for it.
151
+ attributes[GenAIAttributes.GEN_AI_REQUEST_TOP_P] = (
152
+ generation_config.top_p
153
+ )
154
+ if "max_output_tokens" in generation_config:
155
+ attributes[GenAIAttributes.GEN_AI_REQUEST_MAX_TOKENS] = (
156
+ generation_config.max_output_tokens
157
+ )
158
+ if "presence_penalty" in generation_config:
159
+ attributes[GenAIAttributes.GEN_AI_REQUEST_PRESENCE_PENALTY] = (
160
+ generation_config.presence_penalty
161
+ )
162
+ if "frequency_penalty" in generation_config:
163
+ attributes[GenAIAttributes.GEN_AI_REQUEST_FREQUENCY_PENALTY] = (
164
+ generation_config.frequency_penalty
165
+ )
166
+ if "stop_sequences" in generation_config:
167
+ attributes[GenAIAttributes.GEN_AI_REQUEST_STOP_SEQUENCES] = (
168
+ generation_config.stop_sequences
169
+ )
170
+ if use_latest_semconvs:
171
+ if "seed" in generation_config:
172
+ attributes[GenAIAttributes.GEN_AI_REQUEST_SEED] = (
173
+ generation_config.seed
174
+ )
175
+ if "candidate_count" in generation_config:
176
+ attributes[GenAIAttributes.GEN_AI_REQUEST_CHOICE_COUNT] = (
177
+ generation_config.candidate_count
35
178
  )
36
- if Config.exception_logger:
37
- Config.exception_logger(e)
179
+ if "response_mime_type" in generation_config:
180
+ if generation_config.response_mime_type == "text/plain":
181
+ attributes[GenAIAttributes.GEN_AI_OUTPUT_TYPE] = "text"
182
+ elif generation_config.response_mime_type == "application/json":
183
+ attributes[GenAIAttributes.GEN_AI_OUTPUT_TYPE] = "json"
184
+ else:
185
+ attributes[GenAIAttributes.GEN_AI_OUTPUT_TYPE] = (
186
+ generation_config.response_mime_type
187
+ )
188
+
189
+ return attributes
190
+
191
+
192
+ def get_genai_response_attributes(
193
+ response: prediction_service.GenerateContentResponse
194
+ | prediction_service_v1beta1.GenerateContentResponse
195
+ | None,
196
+ ) -> dict[str, AttributeValue]:
197
+ if not response:
198
+ return {}
199
+ finish_reasons: list[str] = [
200
+ _map_finish_reason(candidate.finish_reason)
201
+ for candidate in response.candidates
202
+ ]
203
+ return {
204
+ GenAIAttributes.GEN_AI_RESPONSE_MODEL: response.model_version,
205
+ GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS: finish_reasons,
206
+ GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS: response.usage_metadata.prompt_token_count,
207
+ GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS: response.usage_metadata.candidates_token_count,
208
+ }
209
+
210
+
211
+ _MODEL_STRIP_RE = re.compile(
212
+ r"^projects/(.*)/locations/(.*)/publishers/google/models/"
213
+ )
214
+
215
+
216
+ def _get_model_name(model: str) -> str:
217
+ return _MODEL_STRIP_RE.sub("", model)
218
+
219
+
220
+ OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT = (
221
+ "OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"
222
+ )
223
+
224
+
225
+ @overload
226
+ def is_content_enabled(
227
+ mode: Literal[_StabilityMode.GEN_AI_LATEST_EXPERIMENTAL],
228
+ ) -> ContentCapturingMode: ...
229
+
230
+
231
+ @overload
232
+ def is_content_enabled(mode: Literal[_StabilityMode.DEFAULT]) -> bool: ...
233
+
234
+
235
+ def is_content_enabled(
236
+ mode: Union[
237
+ Literal[_StabilityMode.DEFAULT],
238
+ Literal[_StabilityMode.GEN_AI_LATEST_EXPERIMENTAL],
239
+ ],
240
+ ) -> Union[bool, ContentCapturingMode]:
241
+ if mode == _StabilityMode.DEFAULT:
242
+ capture_content = environ.get(
243
+ OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT, "false"
244
+ )
245
+
246
+ return capture_content.lower() == "true"
247
+ return get_content_capturing_mode()
248
+
249
+
250
+ def get_span_name(span_attributes: Mapping[str, AttributeValue]) -> str:
251
+ name = span_attributes[GenAIAttributes.GEN_AI_OPERATION_NAME]
252
+ model = span_attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL]
253
+ if not model:
254
+ return f"{name}"
255
+ return f"{name} {model}"
256
+
257
+
258
+ def request_to_events(
259
+ *, params: GenerateContentParams, capture_content: bool
260
+ ) -> Iterable[LogRecord]:
261
+ # System message
262
+ if params.system_instruction:
263
+ request_content = _parts_to_any_value(
264
+ capture_content=capture_content,
265
+ parts=params.system_instruction.parts,
266
+ )
267
+ yield system_event(
268
+ role=params.system_instruction.role, content=request_content
269
+ )
270
+
271
+ for content in params.contents or []:
272
+ # Assistant message
273
+ if content.role == _MODEL:
274
+ request_content = _parts_to_any_value(
275
+ capture_content=capture_content, parts=content.parts
276
+ )
277
+
278
+ yield assistant_event(role=content.role, content=request_content)
279
+ continue
280
+
281
+ # Tool event
282
+ #
283
+ # Function call results can be parts inside of a user Content or in a separate Content
284
+ # entry without a role. That may cause duplication in a user event, see
285
+ # https://github.com/open-telemetry/opentelemetry-python-contrib/issues/3280
286
+ function_responses = [
287
+ part.function_response
288
+ for part in content.parts
289
+ if "function_response" in part
290
+ ]
291
+ for idx, function_response in enumerate(function_responses):
292
+ yield tool_event(
293
+ id_=f"{function_response.name}_{idx}",
294
+ role=content.role,
295
+ content=json_format.MessageToDict(
296
+ function_response._pb.response # type: ignore[reportUnknownMemberType]
297
+ )
298
+ if capture_content
299
+ else None,
300
+ )
301
+
302
+ if len(function_responses) == len(content.parts):
303
+ # If the content only contained function responses, don't emit a user event
304
+ continue
305
+
306
+ request_content = _parts_to_any_value(
307
+ capture_content=capture_content, parts=content.parts
308
+ )
309
+ yield user_event(role=content.role, content=request_content)
310
+
311
+
312
+ @dataclass
313
+ class BlobPart:
314
+ data: bytes
315
+ mime_type: str
316
+ type: Literal["blob"] = "blob"
317
+
318
+
319
+ @dataclass
320
+ class FileDataPart:
321
+ mime_type: str
322
+ uri: str
323
+ type: Literal["file_data"] = "file_data"
324
+
325
+ class Config:
326
+ extra = "allow"
327
+
328
+
329
+ def convert_content_to_message_parts(
330
+ content: content.Content | content_v1beta1.Content,
331
+ ) -> list[MessagePart]:
332
+ parts: MessagePart = []
333
+ for idx, part in enumerate(content.parts):
334
+ if "function_response" in part:
335
+ part = part.function_response
336
+ parts.append(
337
+ ToolCallResponse(
338
+ id=f"{part.name}_{idx}",
339
+ response=json_format.MessageToDict(part._pb.response), # type: ignore[reportUnknownMemberType]
340
+ )
341
+ )
342
+ elif "function_call" in part:
343
+ part = part.function_call
344
+ parts.append(
345
+ ToolCall(
346
+ id=f"{part.name}_{idx}",
347
+ name=part.name,
348
+ arguments=json_format.MessageToDict(
349
+ part._pb.args, # type: ignore[reportUnknownMemberType]
350
+ ),
351
+ )
352
+ )
353
+ elif "text" in part:
354
+ parts.append(Text(content=part.text))
355
+ elif "inline_data" in part:
356
+ part = part.inline_data
357
+ parts.append(
358
+ BlobPart(mime_type=part.mime_type or "", data=part.data or b"")
359
+ )
360
+ elif "file_data" in part:
361
+ part = part.file_data
362
+ parts.append(
363
+ FileDataPart(
364
+ mime_type=part.mime_type or "", uri=part.file_uri or ""
365
+ )
366
+ )
367
+ else:
368
+ logging.warning("Unknown part dropped from telemetry %s", part)
369
+ return parts
370
+
371
+
372
+ def response_to_events(
373
+ *,
374
+ response: prediction_service.GenerateContentResponse
375
+ | prediction_service_v1beta1.GenerateContentResponse,
376
+ capture_content: bool,
377
+ ) -> Iterable[LogRecord]:
378
+ for candidate in response.candidates:
379
+ tool_calls = _extract_tool_calls(
380
+ candidate=candidate, capture_content=capture_content
381
+ )
382
+
383
+ # The original function_call Part is still duplicated in message, see
384
+ # https://github.com/open-telemetry/opentelemetry-python-contrib/issues/3280
385
+ yield choice_event(
386
+ finish_reason=_map_finish_reason(candidate.finish_reason),
387
+ index=candidate.index,
388
+ # default to "model" since Vertex uses that instead of assistant
389
+ message=ChoiceMessage(
390
+ role=candidate.content.role or _MODEL,
391
+ content=_parts_to_any_value(
392
+ capture_content=capture_content,
393
+ parts=candidate.content.parts,
394
+ ),
395
+ ),
396
+ tool_calls=tool_calls,
397
+ )
398
+
399
+
400
+ def _extract_tool_calls(
401
+ *,
402
+ candidate: content.Candidate | content_v1beta1.Candidate,
403
+ capture_content: bool,
404
+ ) -> Iterable[ChoiceToolCall]:
405
+ for idx, part in enumerate(candidate.content.parts):
406
+ if "function_call" not in part:
407
+ continue
408
+
409
+ yield ChoiceToolCall(
410
+ # Make up an id with index since vertex expects the indices to line up instead of
411
+ # using ids.
412
+ id=f"{part.function_call.name}_{idx}",
413
+ function=ChoiceToolCall.Function(
414
+ name=part.function_call.name,
415
+ arguments=json_format.MessageToDict(
416
+ part.function_call._pb.args # type: ignore[reportUnknownMemberType]
417
+ )
418
+ if capture_content
419
+ else None,
420
+ ),
421
+ )
422
+
423
+
424
+ def _parts_to_any_value(
425
+ *,
426
+ capture_content: bool,
427
+ parts: Sequence[content.Part] | Sequence[content_v1beta1.Part],
428
+ ) -> list[dict[str, AnyValue]] | None:
429
+ if not capture_content:
430
+ return None
431
+
432
+ return [
433
+ cast(
434
+ "dict[str, AnyValue]",
435
+ type(part).to_dict( # type: ignore[reportUnknownMemberType]
436
+ part, always_print_fields_with_no_presence=False
437
+ ),
438
+ )
439
+ for part in parts
440
+ ]
38
441
 
39
- return wrapper
40
442
 
443
+ def _map_finish_reason(
444
+ finish_reason: content.Candidate.FinishReason
445
+ | content_v1beta1.Candidate.FinishReason,
446
+ ) -> FinishReason | str:
447
+ EnumType = type(finish_reason) # pylint: disable=invalid-name
448
+ if (
449
+ finish_reason is EnumType.FINISH_REASON_UNSPECIFIED
450
+ or finish_reason is EnumType.OTHER
451
+ ):
452
+ return "error"
453
+ if finish_reason is EnumType.STOP:
454
+ return "stop"
455
+ if finish_reason is EnumType.MAX_TOKENS:
456
+ return "length"
41
457
 
42
- def should_emit_events():
43
- return not Config.use_legacy_attributes
458
+ # If there is no 1:1 mapping to an OTel preferred enum value, use the exact vertex reason
459
+ return finish_reason.name
@@ -1 +1,15 @@
1
- __version__ = "0.47.3"
1
+ # Copyright The OpenTelemetry Authors
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ __version__ = "2.1b0"
@@ -0,0 +1,106 @@
1
+ Metadata-Version: 2.4
2
+ Name: opentelemetry-instrumentation-vertexai
3
+ Version: 2.1b0
4
+ Summary: OpenTelemetry Official VertexAI instrumentation
5
+ Project-URL: Homepage, https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation-genai/opentelemetry-instrumentation-vertexai
6
+ Project-URL: Repository, https://github.com/open-telemetry/opentelemetry-python-contrib
7
+ Author-email: OpenTelemetry Authors <cncf-opentelemetry-contributors@lists.cncf.io>
8
+ License-Expression: Apache-2.0
9
+ License-File: LICENSE
10
+ Classifier: Development Status :: 4 - Beta
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: License :: OSI Approved :: Apache Software License
13
+ Classifier: Programming Language :: Python
14
+ Classifier: Programming Language :: Python :: 3
15
+ Classifier: Programming Language :: Python :: 3.9
16
+ Classifier: Programming Language :: Python :: 3.10
17
+ Classifier: Programming Language :: Python :: 3.11
18
+ Classifier: Programming Language :: Python :: 3.12
19
+ Requires-Python: >=3.9
20
+ Requires-Dist: opentelemetry-api~=1.37
21
+ Requires-Dist: opentelemetry-instrumentation~=0.58b0
22
+ Requires-Dist: opentelemetry-semantic-conventions~=0.58b0
23
+ Requires-Dist: opentelemetry-util-genai<0.3b0,>=0.2b0
24
+ Provides-Extra: instruments
25
+ Requires-Dist: google-cloud-aiplatform>=1.64; extra == 'instruments'
26
+ Description-Content-Type: text/x-rst
27
+
28
+ OpenTelemetry VertexAI Instrumentation
29
+ ======================================
30
+
31
+ |pypi|
32
+
33
+ .. |pypi| image:: https://badge.fury.io/py/opentelemetry-instrumentation-vertexai.svg
34
+ :target: https://pypi.org/project/opentelemetry-instrumentation-vertexai/
35
+
36
+ This library allows tracing LLM requests and logging of messages made by the
37
+ `VertexAI Python API library <https://pypi.org/project/google-cloud-aiplatform/>`_.
38
+
39
+
40
+ Installation
41
+ ------------
42
+
43
+ If your application is already instrumented with OpenTelemetry, add this
44
+ package to your requirements.
45
+ ::
46
+
47
+ pip install opentelemetry-instrumentation-vertexai
48
+
49
+ If you don't have an VertexAI application, yet, try our `examples <examples>`_.
50
+
51
+ Check out `zero-code example <examples/zero-code>`_ for a quick start.
52
+
53
+ Usage
54
+ -----
55
+
56
+ This section describes how to set up VertexAI instrumentation if you're setting OpenTelemetry up manually.
57
+ Check out the `manual example <examples/manual>`_ for more details.
58
+
59
+ Instrumenting all clients
60
+ *************************
61
+
62
+ When using the instrumentor, all clients will automatically trace VertexAI chat completion operations.
63
+ You can also optionally capture prompts and completions as log events.
64
+
65
+ Make sure to configure OpenTelemetry tracing, logging, and events to capture all telemetry emitted by the instrumentation.
66
+
67
+ .. code-block:: python
68
+
69
+ from opentelemetry.instrumentation.vertexai import VertexAIInstrumentor
70
+ from vertexai.generative_models import GenerativeModel
71
+
72
+ VertexAIInstrumentor().instrument()
73
+
74
+
75
+ vertexai.init()
76
+ model = GenerativeModel("gemini-1.5-flash-002")
77
+ response = model.generate_content("Write a short poem on OpenTelemetry.")
78
+
79
+ Enabling message content
80
+ *************************
81
+
82
+ Message content such as the contents of the prompt, completion, function arguments and return values
83
+ are not captured by default. To capture message content as log events, set the environment variable
84
+ `OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT` to `true`.
85
+
86
+ Uninstrument
87
+ ************
88
+
89
+ To uninstrument clients, call the uninstrument method:
90
+
91
+ .. code-block:: python
92
+
93
+ from opentelemetry.instrumentation.vertexai import VertexAIInstrumentor
94
+
95
+ VertexAIInstrumentor().instrument()
96
+ # ...
97
+
98
+ # Uninstrument all clients
99
+ VertexAIInstrumentor().uninstrument()
100
+
101
+ References
102
+ ----------
103
+ * `OpenTelemetry VertexAI Instrumentation <https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/vertexai/vertexai.html>`_
104
+ * `OpenTelemetry Project <https://opentelemetry.io/>`_
105
+ * `OpenTelemetry Python Examples <https://github.com/open-telemetry/opentelemetry-python/tree/main/docs/examples>`_
106
+
@@ -0,0 +1,12 @@
1
+ opentelemetry/instrumentation/vertexai/__init__.py,sha256=h2ikRd_hLXscLwbacyfCY5fdFYH76iMxkHHr8r1AeCU,6010
2
+ opentelemetry/instrumentation/vertexai/events.py,sha256=EHp1uqJ-vMKJ5jVxRgHNyboN51JUYEPcXcT3yV5rcRE,5274
3
+ opentelemetry/instrumentation/vertexai/package.py,sha256=CFLAAZb6L_fDNfJgpW-cXjhiQjwGLAuxhdAjMNt3jPM,638
4
+ opentelemetry/instrumentation/vertexai/patch.py,sha256=_YZpMo14k-ccy8VcNbl9rG7VsztLorcS12an6e8wJx8,13543
5
+ opentelemetry/instrumentation/vertexai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ opentelemetry/instrumentation/vertexai/utils.py,sha256=CxOypyBGjpuaKX4lijomyGBupiViC_TpYHgiCTR2nGE,15001
7
+ opentelemetry/instrumentation/vertexai/version.py,sha256=tlQVIB6SsAnJ7uZmREAk0QDaATSCgbgYwdtY4G8GX9M,607
8
+ opentelemetry_instrumentation_vertexai-2.1b0.dist-info/METADATA,sha256=tXZDreIfz0gB64b5zYAdufFr3WdjRL6hB_1k4ynrrL4,3946
9
+ opentelemetry_instrumentation_vertexai-2.1b0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
10
+ opentelemetry_instrumentation_vertexai-2.1b0.dist-info/entry_points.txt,sha256=aAbxWr7zIDuYms-m-ea5GEV2rqyx7xPT8FWr2umrCmU,100
11
+ opentelemetry_instrumentation_vertexai-2.1b0.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
12
+ opentelemetry_instrumentation_vertexai-2.1b0.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 2.2.0
2
+ Generator: hatchling 1.27.0
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
@@ -0,0 +1,2 @@
1
+ [opentelemetry_instrumentor]
2
+ vertexai = opentelemetry.instrumentation.vertexai:VertexAIInstrumentor