opentelemetry-instrumentation-vertexai 0.38.7__py3-none-any.whl → 2.0b0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of opentelemetry-instrumentation-vertexai might be problematic. Click here for more details.

@@ -0,0 +1,142 @@
1
+ # Copyright The OpenTelemetry Authors
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from __future__ import annotations
16
+
17
+ from typing import (
18
+ TYPE_CHECKING,
19
+ Any,
20
+ Callable,
21
+ MutableSequence,
22
+ )
23
+
24
+ from opentelemetry._events import EventLogger
25
+ from opentelemetry.instrumentation.vertexai.utils import (
26
+ GenerateContentParams,
27
+ get_genai_request_attributes,
28
+ get_genai_response_attributes,
29
+ get_server_attributes,
30
+ get_span_name,
31
+ request_to_events,
32
+ response_to_events,
33
+ )
34
+ from opentelemetry.trace import SpanKind, Tracer
35
+
36
+ if TYPE_CHECKING:
37
+ from google.cloud.aiplatform_v1.services.prediction_service import client
38
+ from google.cloud.aiplatform_v1.types import (
39
+ content,
40
+ prediction_service,
41
+ )
42
+ from google.cloud.aiplatform_v1beta1.services.prediction_service import (
43
+ client as client_v1beta1,
44
+ )
45
+ from google.cloud.aiplatform_v1beta1.types import (
46
+ content as content_v1beta1,
47
+ )
48
+ from google.cloud.aiplatform_v1beta1.types import (
49
+ prediction_service as prediction_service_v1beta1,
50
+ )
51
+
52
+
53
+ # Use parameter signature from
54
+ # https://github.com/googleapis/python-aiplatform/blob/v1.76.0/google/cloud/aiplatform_v1/services/prediction_service/client.py#L2088
55
+ # to handle named vs positional args robustly
56
+ def _extract_params(
57
+ request: prediction_service.GenerateContentRequest
58
+ | prediction_service_v1beta1.GenerateContentRequest
59
+ | dict[Any, Any]
60
+ | None = None,
61
+ *,
62
+ model: str | None = None,
63
+ contents: MutableSequence[content.Content]
64
+ | MutableSequence[content_v1beta1.Content]
65
+ | None = None,
66
+ **_kwargs: Any,
67
+ ) -> GenerateContentParams:
68
+ # Request vs the named parameters are mututally exclusive or the RPC will fail
69
+ if not request:
70
+ return GenerateContentParams(
71
+ model=model or "",
72
+ contents=contents,
73
+ )
74
+
75
+ if isinstance(request, dict):
76
+ return GenerateContentParams(**request)
77
+
78
+ return GenerateContentParams(
79
+ model=request.model,
80
+ contents=request.contents,
81
+ system_instruction=request.system_instruction,
82
+ tools=request.tools,
83
+ tool_config=request.tool_config,
84
+ labels=request.labels,
85
+ safety_settings=request.safety_settings,
86
+ generation_config=request.generation_config,
87
+ )
88
+
89
+
90
+ def generate_content_create(
91
+ tracer: Tracer, event_logger: EventLogger, capture_content: bool
92
+ ):
93
+ """Wrap the `generate_content` method of the `GenerativeModel` class to trace it."""
94
+
95
+ def traced_method(
96
+ wrapped: Callable[
97
+ ...,
98
+ prediction_service.GenerateContentResponse
99
+ | prediction_service_v1beta1.GenerateContentResponse,
100
+ ],
101
+ instance: client.PredictionServiceClient
102
+ | client_v1beta1.PredictionServiceClient,
103
+ args: Any,
104
+ kwargs: Any,
105
+ ):
106
+ params = _extract_params(*args, **kwargs)
107
+ api_endpoint: str = instance.api_endpoint # type: ignore[reportUnknownMemberType]
108
+ span_attributes = {
109
+ **get_genai_request_attributes(params),
110
+ **get_server_attributes(api_endpoint),
111
+ }
112
+
113
+ span_name = get_span_name(span_attributes)
114
+ with tracer.start_as_current_span(
115
+ name=span_name,
116
+ kind=SpanKind.CLIENT,
117
+ attributes=span_attributes,
118
+ ) as span:
119
+ for event in request_to_events(
120
+ params=params, capture_content=capture_content
121
+ ):
122
+ event_logger.emit(event)
123
+
124
+ # TODO: set error.type attribute
125
+ # https://github.com/open-telemetry/semantic-conventions/blob/main/docs/gen-ai/gen-ai-spans.md
126
+ response = wrapped(*args, **kwargs)
127
+ # TODO: handle streaming
128
+ # if is_streaming(kwargs):
129
+ # return StreamWrapper(
130
+ # result, span, event_logger, capture_content
131
+ # )
132
+
133
+ if span.is_recording():
134
+ span.set_attributes(get_genai_response_attributes(response))
135
+ for event in response_to_events(
136
+ response=response, capture_content=capture_content
137
+ ):
138
+ event_logger.emit(event)
139
+
140
+ return response
141
+
142
+ return traced_method
File without changes
@@ -1,29 +1,344 @@
1
- import logging
2
- import traceback
1
+ # Copyright The OpenTelemetry Authors
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
3
14
 
4
- from opentelemetry.instrumentation.vertexai.config import Config
15
+ from __future__ import annotations
5
16
 
17
+ import re
18
+ from dataclasses import dataclass
19
+ from os import environ
20
+ from typing import (
21
+ TYPE_CHECKING,
22
+ Iterable,
23
+ Mapping,
24
+ Sequence,
25
+ cast,
26
+ )
27
+ from urllib.parse import urlparse
28
+
29
+ from google.protobuf import json_format
30
+
31
+ from opentelemetry._events import Event
32
+ from opentelemetry.instrumentation.vertexai.events import (
33
+ ChoiceMessage,
34
+ ChoiceToolCall,
35
+ FinishReason,
36
+ assistant_event,
37
+ choice_event,
38
+ system_event,
39
+ tool_event,
40
+ user_event,
41
+ )
42
+ from opentelemetry.semconv._incubating.attributes import (
43
+ gen_ai_attributes as GenAIAttributes,
44
+ )
45
+ from opentelemetry.semconv.attributes import server_attributes
46
+ from opentelemetry.util.types import AnyValue, AttributeValue
47
+
48
+ if TYPE_CHECKING:
49
+ from google.cloud.aiplatform_v1.types import (
50
+ content,
51
+ prediction_service,
52
+ tool,
53
+ )
54
+ from google.cloud.aiplatform_v1beta1.types import (
55
+ content as content_v1beta1,
56
+ )
57
+ from google.cloud.aiplatform_v1beta1.types import (
58
+ prediction_service as prediction_service_v1beta1,
59
+ )
60
+ from google.cloud.aiplatform_v1beta1.types import (
61
+ tool as tool_v1beta1,
62
+ )
63
+
64
+
65
+ _MODEL = "model"
6
66
 
7
- def dont_throw(func):
8
- """
9
- A decorator that wraps the passed in function and logs exceptions instead of throwing them.
10
67
 
11
- @param func: The function to wrap
12
- @return: The wrapper function
68
+ @dataclass(frozen=True)
69
+ class GenerateContentParams:
70
+ model: str
71
+ contents: (
72
+ Sequence[content.Content] | Sequence[content_v1beta1.Content] | None
73
+ ) = None
74
+ system_instruction: content.Content | content_v1beta1.Content | None = None
75
+ tools: Sequence[tool.Tool] | Sequence[tool_v1beta1.Tool] | None = None
76
+ tool_config: tool.ToolConfig | tool_v1beta1.ToolConfig | None = None
77
+ labels: Mapping[str, str] | None = None
78
+ safety_settings: (
79
+ Sequence[content.SafetySetting]
80
+ | Sequence[content_v1beta1.SafetySetting]
81
+ | None
82
+ ) = None
83
+ generation_config: (
84
+ content.GenerationConfig | content_v1beta1.GenerationConfig | None
85
+ ) = None
86
+
87
+
88
+ def get_server_attributes(
89
+ endpoint: str,
90
+ ) -> dict[str, AttributeValue]:
91
+ """Get server.* attributes from the endpoint, which is a hostname with optional port e.g.
92
+ - ``us-central1-aiplatform.googleapis.com``
93
+ - ``us-central1-aiplatform.googleapis.com:5431``
13
94
  """
14
- # Obtain a logger specific to the function's module
15
- logger = logging.getLogger(func.__module__)
16
-
17
- def wrapper(*args, **kwargs):
18
- try:
19
- return func(*args, **kwargs)
20
- except Exception as e:
21
- logger.debug(
22
- "OpenLLMetry failed to trace in %s, error: %s",
23
- func.__name__,
24
- traceback.format_exc(),
95
+ parsed = urlparse(f"scheme://{endpoint}")
96
+
97
+ if not parsed.hostname:
98
+ return {}
99
+
100
+ return {
101
+ server_attributes.SERVER_ADDRESS: parsed.hostname,
102
+ server_attributes.SERVER_PORT: parsed.port or 443,
103
+ }
104
+
105
+
106
+ def get_genai_request_attributes(
107
+ params: GenerateContentParams,
108
+ operation_name: GenAIAttributes.GenAiOperationNameValues = GenAIAttributes.GenAiOperationNameValues.CHAT,
109
+ ):
110
+ model = _get_model_name(params.model)
111
+ generation_config = params.generation_config
112
+ attributes: dict[str, AttributeValue] = {
113
+ GenAIAttributes.GEN_AI_OPERATION_NAME: operation_name.value,
114
+ GenAIAttributes.GEN_AI_SYSTEM: GenAIAttributes.GenAiSystemValues.VERTEX_AI.value,
115
+ GenAIAttributes.GEN_AI_REQUEST_MODEL: model,
116
+ }
117
+
118
+ if not generation_config:
119
+ return attributes
120
+
121
+ # Check for optional fields
122
+ # https://proto-plus-python.readthedocs.io/en/stable/fields.html#optional-fields
123
+ if "temperature" in generation_config:
124
+ attributes[GenAIAttributes.GEN_AI_REQUEST_TEMPERATURE] = (
125
+ generation_config.temperature
126
+ )
127
+ if "top_p" in generation_config:
128
+ attributes[GenAIAttributes.GEN_AI_REQUEST_TOP_P] = (
129
+ generation_config.top_p
130
+ )
131
+ if "max_output_tokens" in generation_config:
132
+ attributes[GenAIAttributes.GEN_AI_REQUEST_MAX_TOKENS] = (
133
+ generation_config.max_output_tokens
134
+ )
135
+ if "presence_penalty" in generation_config:
136
+ attributes[GenAIAttributes.GEN_AI_REQUEST_PRESENCE_PENALTY] = (
137
+ generation_config.presence_penalty
138
+ )
139
+ if "frequency_penalty" in generation_config:
140
+ attributes[GenAIAttributes.GEN_AI_REQUEST_FREQUENCY_PENALTY] = (
141
+ generation_config.frequency_penalty
142
+ )
143
+ # Uncomment once GEN_AI_REQUEST_SEED is released in 1.30
144
+ # https://github.com/open-telemetry/semantic-conventions/pull/1710
145
+ # if "seed" in generation_config:
146
+ # attributes[GenAIAttributes.GEN_AI_REQUEST_SEED] = (
147
+ # generation_config.seed
148
+ # )
149
+ if "stop_sequences" in generation_config:
150
+ attributes[GenAIAttributes.GEN_AI_REQUEST_STOP_SEQUENCES] = (
151
+ generation_config.stop_sequences
152
+ )
153
+
154
+ return attributes
155
+
156
+
157
+ def get_genai_response_attributes(
158
+ response: prediction_service.GenerateContentResponse
159
+ | prediction_service_v1beta1.GenerateContentResponse,
160
+ ) -> dict[str, AttributeValue]:
161
+ finish_reasons: list[str] = [
162
+ _map_finish_reason(candidate.finish_reason)
163
+ for candidate in response.candidates
164
+ ]
165
+ # TODO: add gen_ai.response.id once available in the python client
166
+ # https://github.com/open-telemetry/opentelemetry-python-contrib/issues/3246
167
+ return {
168
+ GenAIAttributes.GEN_AI_RESPONSE_MODEL: response.model_version,
169
+ GenAIAttributes.GEN_AI_RESPONSE_FINISH_REASONS: finish_reasons,
170
+ GenAIAttributes.GEN_AI_USAGE_INPUT_TOKENS: response.usage_metadata.prompt_token_count,
171
+ GenAIAttributes.GEN_AI_USAGE_OUTPUT_TOKENS: response.usage_metadata.candidates_token_count,
172
+ }
173
+
174
+
175
+ _MODEL_STRIP_RE = re.compile(
176
+ r"^projects/(.*)/locations/(.*)/publishers/google/models/"
177
+ )
178
+
179
+
180
+ def _get_model_name(model: str) -> str:
181
+ return _MODEL_STRIP_RE.sub("", model)
182
+
183
+
184
+ OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT = (
185
+ "OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"
186
+ )
187
+
188
+
189
+ def is_content_enabled() -> bool:
190
+ capture_content = environ.get(
191
+ OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT, "false"
192
+ )
193
+
194
+ return capture_content.lower() == "true"
195
+
196
+
197
+ def get_span_name(span_attributes: Mapping[str, AttributeValue]) -> str:
198
+ name = span_attributes[GenAIAttributes.GEN_AI_OPERATION_NAME]
199
+ model = span_attributes[GenAIAttributes.GEN_AI_REQUEST_MODEL]
200
+ if not model:
201
+ return f"{name}"
202
+ return f"{name} {model}"
203
+
204
+
205
+ def request_to_events(
206
+ *, params: GenerateContentParams, capture_content: bool
207
+ ) -> Iterable[Event]:
208
+ # System message
209
+ if params.system_instruction:
210
+ request_content = _parts_to_any_value(
211
+ capture_content=capture_content,
212
+ parts=params.system_instruction.parts,
213
+ )
214
+ yield system_event(
215
+ role=params.system_instruction.role, content=request_content
216
+ )
217
+
218
+ for content in params.contents or []:
219
+ # Assistant message
220
+ if content.role == _MODEL:
221
+ request_content = _parts_to_any_value(
222
+ capture_content=capture_content, parts=content.parts
223
+ )
224
+
225
+ yield assistant_event(role=content.role, content=request_content)
226
+ continue
227
+
228
+ # Tool event
229
+ #
230
+ # Function call results can be parts inside of a user Content or in a separate Content
231
+ # entry without a role. That may cause duplication in a user event, see
232
+ # https://github.com/open-telemetry/opentelemetry-python-contrib/issues/3280
233
+ function_responses = [
234
+ part.function_response
235
+ for part in content.parts
236
+ if "function_response" in part
237
+ ]
238
+ for idx, function_response in enumerate(function_responses):
239
+ yield tool_event(
240
+ id_=f"{function_response.name}_{idx}",
241
+ role=content.role,
242
+ content=json_format.MessageToDict(
243
+ function_response._pb.response # type: ignore[reportUnknownMemberType]
244
+ )
245
+ if capture_content
246
+ else None,
25
247
  )
26
- if Config.exception_logger:
27
- Config.exception_logger(e)
28
248
 
29
- return wrapper
249
+ if len(function_responses) == len(content.parts):
250
+ # If the content only contained function responses, don't emit a user event
251
+ continue
252
+
253
+ request_content = _parts_to_any_value(
254
+ capture_content=capture_content, parts=content.parts
255
+ )
256
+ yield user_event(role=content.role, content=request_content)
257
+
258
+
259
+ def response_to_events(
260
+ *,
261
+ response: prediction_service.GenerateContentResponse
262
+ | prediction_service_v1beta1.GenerateContentResponse,
263
+ capture_content: bool,
264
+ ) -> Iterable[Event]:
265
+ for candidate in response.candidates:
266
+ tool_calls = _extract_tool_calls(
267
+ candidate=candidate, capture_content=capture_content
268
+ )
269
+
270
+ # The original function_call Part is still duplicated in message, see
271
+ # https://github.com/open-telemetry/opentelemetry-python-contrib/issues/3280
272
+ yield choice_event(
273
+ finish_reason=_map_finish_reason(candidate.finish_reason),
274
+ index=candidate.index,
275
+ # default to "model" since Vertex uses that instead of assistant
276
+ message=ChoiceMessage(
277
+ role=candidate.content.role or _MODEL,
278
+ content=_parts_to_any_value(
279
+ capture_content=capture_content,
280
+ parts=candidate.content.parts,
281
+ ),
282
+ ),
283
+ tool_calls=tool_calls,
284
+ )
285
+
286
+
287
+ def _extract_tool_calls(
288
+ *,
289
+ candidate: content.Candidate | content_v1beta1.Candidate,
290
+ capture_content: bool,
291
+ ) -> Iterable[ChoiceToolCall]:
292
+ for idx, part in enumerate(candidate.content.parts):
293
+ if "function_call" not in part:
294
+ continue
295
+
296
+ yield ChoiceToolCall(
297
+ # Make up an id with index since vertex expects the indices to line up instead of
298
+ # using ids.
299
+ id=f"{part.function_call.name}_{idx}",
300
+ function=ChoiceToolCall.Function(
301
+ name=part.function_call.name,
302
+ arguments=json_format.MessageToDict(
303
+ part.function_call._pb.args # type: ignore[reportUnknownMemberType]
304
+ )
305
+ if capture_content
306
+ else None,
307
+ ),
308
+ )
309
+
310
+
311
+ def _parts_to_any_value(
312
+ *,
313
+ capture_content: bool,
314
+ parts: Sequence[content.Part] | Sequence[content_v1beta1.Part],
315
+ ) -> list[dict[str, AnyValue]] | None:
316
+ if not capture_content:
317
+ return None
318
+
319
+ return [
320
+ cast(
321
+ "dict[str, AnyValue]",
322
+ type(part).to_dict(part, including_default_value_fields=False), # type: ignore[reportUnknownMemberType]
323
+ )
324
+ for part in parts
325
+ ]
326
+
327
+
328
+ def _map_finish_reason(
329
+ finish_reason: content.Candidate.FinishReason
330
+ | content_v1beta1.Candidate.FinishReason,
331
+ ) -> FinishReason | str:
332
+ EnumType = type(finish_reason) # pylint: disable=invalid-name
333
+ if (
334
+ finish_reason is EnumType.FINISH_REASON_UNSPECIFIED
335
+ or finish_reason is EnumType.OTHER
336
+ ):
337
+ return "error"
338
+ if finish_reason is EnumType.STOP:
339
+ return "stop"
340
+ if finish_reason is EnumType.MAX_TOKENS:
341
+ return "length"
342
+
343
+ # If there is no 1:1 mapping to an OTel preferred enum value, use the exact vertex reason
344
+ return finish_reason.name
@@ -1 +1,15 @@
1
- __version__ = "0.38.7"
1
+ # Copyright The OpenTelemetry Authors
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ __version__ = "2.0b0"
@@ -0,0 +1,106 @@
1
+ Metadata-Version: 2.4
2
+ Name: opentelemetry-instrumentation-vertexai
3
+ Version: 2.0b0
4
+ Summary: OpenTelemetry Official VertexAI instrumentation
5
+ Project-URL: Homepage, https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation-genai/opentelemetry-instrumentation-vertexai
6
+ Project-URL: Repository, https://github.com/open-telemetry/opentelemetry-python-contrib
7
+ Author-email: OpenTelemetry Authors <cncf-opentelemetry-contributors@lists.cncf.io>
8
+ License-Expression: Apache-2.0
9
+ License-File: LICENSE
10
+ Classifier: Development Status :: 4 - Beta
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: License :: OSI Approved :: Apache Software License
13
+ Classifier: Programming Language :: Python
14
+ Classifier: Programming Language :: Python :: 3
15
+ Classifier: Programming Language :: Python :: 3.8
16
+ Classifier: Programming Language :: Python :: 3.9
17
+ Classifier: Programming Language :: Python :: 3.10
18
+ Classifier: Programming Language :: Python :: 3.11
19
+ Classifier: Programming Language :: Python :: 3.12
20
+ Requires-Python: >=3.8
21
+ Requires-Dist: opentelemetry-api~=1.28
22
+ Requires-Dist: opentelemetry-instrumentation~=0.49b0
23
+ Requires-Dist: opentelemetry-semantic-conventions~=0.49b0
24
+ Provides-Extra: instruments
25
+ Requires-Dist: google-cloud-aiplatform>=1.64; extra == 'instruments'
26
+ Description-Content-Type: text/x-rst
27
+
28
+ OpenTelemetry VertexAI Instrumentation
29
+ ======================================
30
+
31
+ |pypi|
32
+
33
+ .. |pypi| image:: https://badge.fury.io/py/opentelemetry-instrumentation-vertexai.svg
34
+ :target: https://pypi.org/project/opentelemetry-instrumentation-vertexai/
35
+
36
+ This library allows tracing LLM requests and logging of messages made by the
37
+ `VertexAI Python API library <https://pypi.org/project/google-cloud-aiplatform/>`_.
38
+
39
+
40
+ Installation
41
+ ------------
42
+
43
+ If your application is already instrumented with OpenTelemetry, add this
44
+ package to your requirements.
45
+ ::
46
+
47
+ pip install opentelemetry-instrumentation-vertexai
48
+
49
+ If you don't have an VertexAI application, yet, try our `examples <examples>`_.
50
+
51
+ Check out `zero-code example <examples/zero-code>`_ for a quick start.
52
+
53
+ Usage
54
+ -----
55
+
56
+ This section describes how to set up VertexAI instrumentation if you're setting OpenTelemetry up manually.
57
+ Check out the `manual example <examples/manual>`_ for more details.
58
+
59
+ Instrumenting all clients
60
+ *************************
61
+
62
+ When using the instrumentor, all clients will automatically trace VertexAI chat completion operations.
63
+ You can also optionally capture prompts and completions as log events.
64
+
65
+ Make sure to configure OpenTelemetry tracing, logging, and events to capture all telemetry emitted by the instrumentation.
66
+
67
+ .. code-block:: python
68
+
69
+ from opentelemetry.instrumentation.vertexai import VertexAIInstrumentor
70
+ from vertexai.generative_models import GenerativeModel
71
+
72
+ VertexAIInstrumentor().instrument()
73
+
74
+
75
+ vertexai.init()
76
+ model = GenerativeModel("gemini-1.5-flash-002")
77
+ response = model.generate_content("Write a short poem on OpenTelemetry.")
78
+
79
+ Enabling message content
80
+ *************************
81
+
82
+ Message content such as the contents of the prompt, completion, function arguments and return values
83
+ are not captured by default. To capture message content as log events, set the environment variable
84
+ `OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT` to `true`.
85
+
86
+ Uninstrument
87
+ ************
88
+
89
+ To uninstrument clients, call the uninstrument method:
90
+
91
+ .. code-block:: python
92
+
93
+ from opentelemetry.instrumentation.vertexai import VertexAIInstrumentor
94
+
95
+ VertexAIInstrumentor().instrument()
96
+ # ...
97
+
98
+ # Uninstrument all clients
99
+ VertexAIInstrumentor().uninstrument()
100
+
101
+ References
102
+ ----------
103
+ * `OpenTelemetry VertexAI Instrumentation <https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/vertexai/vertexai.html>`_
104
+ * `OpenTelemetry Project <https://opentelemetry.io/>`_
105
+ * `OpenTelemetry Python Examples <https://github.com/open-telemetry/opentelemetry-python/tree/main/docs/examples>`_
106
+
@@ -0,0 +1,12 @@
1
+ opentelemetry/instrumentation/vertexai/__init__.py,sha256=DrASu5cA6RjJU9fX6z-T4Oi_DfG5bmZi49mX4D7k_us,3188
2
+ opentelemetry/instrumentation/vertexai/events.py,sha256=0PlFioS1I_hnvelEwFAOMxwiLBQqQpq9ADSZa8yxF_c,5161
3
+ opentelemetry/instrumentation/vertexai/package.py,sha256=CFLAAZb6L_fDNfJgpW-cXjhiQjwGLAuxhdAjMNt3jPM,638
4
+ opentelemetry/instrumentation/vertexai/patch.py,sha256=UIgMrsT4Qhr6bFRUn2vca_ntQgR7MGUMRlVZEsgMvDA,4769
5
+ opentelemetry/instrumentation/vertexai/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ opentelemetry/instrumentation/vertexai/utils.py,sha256=CJwxZIH-9CmnCcbc9TXsG9W59NaJdU_cHDVTdK-6jCU,11546
7
+ opentelemetry/instrumentation/vertexai/version.py,sha256=3DvzQveBD-YdMIJDP5YIVXzqInnizLBgk8mSkEdl7CA,607
8
+ opentelemetry_instrumentation_vertexai-2.0b0.dist-info/METADATA,sha256=G6fPgJcQPWVvSTSeD1oC-ssN6V7_Rql_30B1Q3BaW3I,3942
9
+ opentelemetry_instrumentation_vertexai-2.0b0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
10
+ opentelemetry_instrumentation_vertexai-2.0b0.dist-info/entry_points.txt,sha256=aAbxWr7zIDuYms-m-ea5GEV2rqyx7xPT8FWr2umrCmU,100
11
+ opentelemetry_instrumentation_vertexai-2.0b0.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
12
+ opentelemetry_instrumentation_vertexai-2.0b0.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 2.1.1
2
+ Generator: hatchling 1.27.0
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
@@ -0,0 +1,2 @@
1
+ [opentelemetry_instrumentor]
2
+ vertexai = opentelemetry.instrumentation.vertexai:VertexAIInstrumentor