opentelemetry-instrumentation-vertexai 0.12.4__tar.gz → 0.49.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,20 +1,25 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.4
2
2
  Name: opentelemetry-instrumentation-vertexai
3
- Version: 0.12.4
3
+ Version: 0.49.6
4
4
  Summary: OpenTelemetry Vertex AI instrumentation
5
5
  License: Apache-2.0
6
6
  Author: Gal Kleinman
7
7
  Author-email: gal@traceloop.com
8
- Requires-Python: >=3.8.1,<4
8
+ Requires-Python: >=3.9,<4
9
9
  Classifier: License :: OSI Approved :: Apache Software License
10
10
  Classifier: Programming Language :: Python :: 3
11
11
  Classifier: Programming Language :: Python :: 3.9
12
12
  Classifier: Programming Language :: Python :: 3.10
13
13
  Classifier: Programming Language :: Python :: 3.11
14
14
  Classifier: Programming Language :: Python :: 3.12
15
- Requires-Dist: opentelemetry-api (>=1.22.0,<2.0.0)
16
- Requires-Dist: opentelemetry-instrumentation (==0.43b0)
17
- Requires-Dist: opentelemetry-semantic-conventions-ai (>=0.0.20,<0.0.21)
15
+ Classifier: Programming Language :: Python :: 3.13
16
+ Classifier: Programming Language :: Python :: 3.14
17
+ Provides-Extra: instruments
18
+ Requires-Dist: opentelemetry-api (>=1.38.0,<2.0.0)
19
+ Requires-Dist: opentelemetry-instrumentation (>=0.59b0)
20
+ Requires-Dist: opentelemetry-semantic-conventions (>=0.59b0)
21
+ Requires-Dist: opentelemetry-semantic-conventions-ai (>=0.4.13,<0.5.0)
22
+ Project-URL: Repository, https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-vertexai
18
23
  Description-Content-Type: text/markdown
19
24
 
20
25
  # OpenTelemetry VertexAI Instrumentation
@@ -31,6 +36,14 @@ This library allows tracing VertexAI prompts and completions sent with the offic
31
36
  pip install opentelemetry-instrumentation-vertexai
32
37
  ```
33
38
 
39
+ ## Example usage
40
+
41
+ ```python
42
+ from opentelemetry.instrumentation.vertexai import VertexAIInstrumentor
43
+
44
+ VertexAIInstrumentor().instrument()
45
+ ```
46
+
34
47
  ## Privacy
35
48
 
36
49
  **By default, this instrumentation logs prompts, completions, and embeddings to span attributes**. This gives you a clear visibility into how your LLM application is working, and can make it easy to debug and evaluate the quality of the outputs.
@@ -12,6 +12,14 @@ This library allows tracing VertexAI prompts and completions sent with the offic
12
12
  pip install opentelemetry-instrumentation-vertexai
13
13
  ```
14
14
 
15
+ ## Example usage
16
+
17
+ ```python
18
+ from opentelemetry.instrumentation.vertexai import VertexAIInstrumentor
19
+
20
+ VertexAIInstrumentor().instrument()
21
+ ```
22
+
15
23
  ## Privacy
16
24
 
17
25
  **By default, this instrumentation logs prompts, completions, and embeddings to span attributes**. This gives you a clear visibility into how your LLM application is working, and can make it easy to debug and evaluate the quality of the outputs.
@@ -0,0 +1,369 @@
1
+ """OpenTelemetry Vertex AI instrumentation"""
2
+
3
+ import logging
4
+ import types
5
+ from typing import Collection
6
+
7
+ from opentelemetry import context as context_api
8
+ from opentelemetry._logs import get_logger
9
+ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
10
+ from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY, unwrap
11
+ from opentelemetry.instrumentation.vertexai.config import Config
12
+ from opentelemetry.instrumentation.vertexai.event_emitter import (
13
+ emit_prompt_events,
14
+ emit_response_events,
15
+ )
16
+ from opentelemetry.instrumentation.vertexai.span_utils import (
17
+ set_input_attributes,
18
+ set_input_attributes_sync,
19
+ set_model_input_attributes,
20
+ set_model_response_attributes,
21
+ set_response_attributes,
22
+ )
23
+ from opentelemetry.instrumentation.vertexai.utils import dont_throw, should_emit_events
24
+ from opentelemetry.instrumentation.vertexai.version import __version__
25
+ from opentelemetry.semconv._incubating.attributes import (
26
+ gen_ai_attributes as GenAIAttributes,
27
+ )
28
+ from opentelemetry.semconv_ai import (
29
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY,
30
+ LLMRequestTypeValues,
31
+ SpanAttributes,
32
+ )
33
+ from opentelemetry.trace import SpanKind, get_tracer
34
+ from opentelemetry.trace.status import Status, StatusCode
35
+ from wrapt import wrap_function_wrapper
36
+
37
+ logger = logging.getLogger(__name__)
38
+
39
+ _instruments = ("google-cloud-aiplatform >= 1.38.1",)
40
+
41
+ WRAPPED_METHODS = [
42
+ {
43
+ "package": "vertexai.generative_models",
44
+ "object": "GenerativeModel",
45
+ "method": "generate_content",
46
+ "span_name": "vertexai.generate_content",
47
+ "is_async": False,
48
+ },
49
+ {
50
+ "package": "vertexai.generative_models",
51
+ "object": "GenerativeModel",
52
+ "method": "generate_content_async",
53
+ "span_name": "vertexai.generate_content_async",
54
+ "is_async": True,
55
+ },
56
+ {
57
+ "package": "vertexai.generative_models",
58
+ "object": "ChatSession",
59
+ "method": "send_message",
60
+ "span_name": "vertexai.send_message",
61
+ "is_async": False,
62
+ },
63
+ {
64
+ "package": "vertexai.preview.generative_models",
65
+ "object": "GenerativeModel",
66
+ "method": "generate_content",
67
+ "span_name": "vertexai.generate_content",
68
+ "is_async": False,
69
+ },
70
+ {
71
+ "package": "vertexai.preview.generative_models",
72
+ "object": "GenerativeModel",
73
+ "method": "generate_content_async",
74
+ "span_name": "vertexai.generate_content_async",
75
+ "is_async": True,
76
+ },
77
+ {
78
+ "package": "vertexai.preview.generative_models",
79
+ "object": "ChatSession",
80
+ "method": "send_message",
81
+ "span_name": "vertexai.send_message",
82
+ "is_async": False,
83
+ },
84
+ {
85
+ "package": "vertexai.language_models",
86
+ "object": "TextGenerationModel",
87
+ "method": "predict",
88
+ "span_name": "vertexai.predict",
89
+ "is_async": False,
90
+ },
91
+ {
92
+ "package": "vertexai.language_models",
93
+ "object": "TextGenerationModel",
94
+ "method": "predict_async",
95
+ "span_name": "vertexai.predict_async",
96
+ "is_async": True,
97
+ },
98
+ {
99
+ "package": "vertexai.language_models",
100
+ "object": "TextGenerationModel",
101
+ "method": "predict_streaming",
102
+ "span_name": "vertexai.predict_streaming",
103
+ "is_async": False,
104
+ },
105
+ {
106
+ "package": "vertexai.language_models",
107
+ "object": "TextGenerationModel",
108
+ "method": "predict_streaming_async",
109
+ "span_name": "vertexai.predict_streaming_async",
110
+ "is_async": True,
111
+ },
112
+ {
113
+ "package": "vertexai.language_models",
114
+ "object": "ChatSession",
115
+ "method": "send_message",
116
+ "span_name": "vertexai.send_message",
117
+ "is_async": False,
118
+ },
119
+ {
120
+ "package": "vertexai.language_models",
121
+ "object": "ChatSession",
122
+ "method": "send_message_streaming",
123
+ "span_name": "vertexai.send_message_streaming",
124
+ "is_async": False,
125
+ },
126
+ ]
127
+
128
+
129
+ def is_streaming_response(response):
130
+ return isinstance(response, types.GeneratorType)
131
+
132
+
133
+ def is_async_streaming_response(response):
134
+ return isinstance(response, types.AsyncGeneratorType)
135
+
136
+
137
+ @dont_throw
138
+ def handle_streaming_response(span, event_logger, llm_model, response, token_usage):
139
+ set_model_response_attributes(span, llm_model, token_usage)
140
+ if should_emit_events():
141
+ emit_response_events(response, event_logger)
142
+ else:
143
+ set_response_attributes(span, llm_model, response)
144
+ if span.is_recording():
145
+ span.set_status(Status(StatusCode.OK))
146
+
147
+
148
+ def _build_from_streaming_response(span, event_logger, response, llm_model):
149
+ complete_response = ""
150
+ token_usage = None
151
+ for item in response:
152
+ item_to_yield = item
153
+ complete_response += str(item.text)
154
+ if item.usage_metadata:
155
+ token_usage = item.usage_metadata
156
+
157
+ yield item_to_yield
158
+
159
+ handle_streaming_response(
160
+ span, event_logger, llm_model, complete_response, token_usage
161
+ )
162
+
163
+ span.set_status(Status(StatusCode.OK))
164
+ span.end()
165
+
166
+
167
+ async def _abuild_from_streaming_response(span, event_logger, response, llm_model):
168
+ complete_response = ""
169
+ token_usage = None
170
+ async for item in response:
171
+ item_to_yield = item
172
+ complete_response += str(item.text)
173
+ if item.usage_metadata:
174
+ token_usage = item.usage_metadata
175
+
176
+ yield item_to_yield
177
+
178
+ handle_streaming_response(span, event_logger, llm_model, response, token_usage)
179
+
180
+ span.set_status(Status(StatusCode.OK))
181
+ span.end()
182
+
183
+
184
+ @dont_throw
185
+ async def _handle_request(span, event_logger, args, kwargs, llm_model):
186
+ set_model_input_attributes(span, kwargs, llm_model)
187
+ if should_emit_events():
188
+ emit_prompt_events(args, event_logger)
189
+ else:
190
+ await set_input_attributes(span, args)
191
+
192
+
193
+ def _handle_response(span, event_logger, response, llm_model):
194
+ set_model_response_attributes(span, llm_model, response.usage_metadata)
195
+ if should_emit_events():
196
+ emit_response_events(response, event_logger)
197
+ else:
198
+ set_response_attributes(
199
+ span, llm_model, response.candidates[0].text if response.candidates else ""
200
+ )
201
+ if span.is_recording():
202
+ span.set_status(Status(StatusCode.OK))
203
+
204
+
205
+ def _with_tracer_wrapper(func):
206
+ """Helper for providing tracer for wrapper functions."""
207
+
208
+ def _with_tracer(tracer, event_logger, to_wrap):
209
+ def wrapper(wrapped, instance, args, kwargs):
210
+ return func(tracer, event_logger, to_wrap, wrapped, instance, args, kwargs)
211
+
212
+ return wrapper
213
+
214
+ return _with_tracer
215
+
216
+
217
+ @_with_tracer_wrapper
218
+ async def _awrap(tracer, event_logger, to_wrap, wrapped, instance, args, kwargs):
219
+ """Instruments and calls every function defined in TO_WRAP."""
220
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
221
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
222
+ ):
223
+ return await wrapped(*args, **kwargs)
224
+
225
+ llm_model = "unknown"
226
+ if hasattr(instance, "_model_id"):
227
+ llm_model = instance._model_id
228
+ if hasattr(instance, "_model_name"):
229
+ llm_model = instance._model_name.replace("publishers/google/models/", "")
230
+ # For ChatSession, try to get model from the parent model object
231
+ if hasattr(instance, "_model") and hasattr(instance._model, "_model_name"):
232
+ llm_model = instance._model._model_name.replace("publishers/google/models/", "")
233
+ elif hasattr(instance, "_model") and hasattr(instance._model, "_model_id"):
234
+ llm_model = instance._model._model_id
235
+
236
+ name = to_wrap.get("span_name")
237
+ span = tracer.start_span(
238
+ name,
239
+ kind=SpanKind.CLIENT,
240
+ attributes={
241
+ GenAIAttributes.GEN_AI_SYSTEM: "Google",
242
+ SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
243
+ },
244
+ )
245
+
246
+ await _handle_request(span, event_logger, args, kwargs, llm_model)
247
+
248
+ response = await wrapped(*args, **kwargs)
249
+
250
+ if response:
251
+ if is_streaming_response(response):
252
+ return _build_from_streaming_response(
253
+ span, event_logger, response, llm_model
254
+ )
255
+ elif is_async_streaming_response(response):
256
+ return _abuild_from_streaming_response(
257
+ span, event_logger, response, llm_model
258
+ )
259
+ else:
260
+ _handle_response(span, event_logger, response, llm_model)
261
+
262
+ span.end()
263
+ return response
264
+
265
+
266
+ @_with_tracer_wrapper
267
+ def _wrap(tracer, event_logger, to_wrap, wrapped, instance, args, kwargs):
268
+ """Instruments and calls every function defined in TO_WRAP."""
269
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
270
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
271
+ ):
272
+ return wrapped(*args, **kwargs)
273
+
274
+ llm_model = "unknown"
275
+ if hasattr(instance, "_model_id"):
276
+ llm_model = instance._model_id
277
+ if hasattr(instance, "_model_name"):
278
+ llm_model = instance._model_name.replace("publishers/google/models/", "")
279
+ # For ChatSession, try to get model from the parent model object
280
+ if hasattr(instance, "_model") and hasattr(instance._model, "_model_name"):
281
+ llm_model = instance._model._model_name.replace("publishers/google/models/", "")
282
+ elif hasattr(instance, "_model") and hasattr(instance._model, "_model_id"):
283
+ llm_model = instance._model._model_id
284
+
285
+ name = to_wrap.get("span_name")
286
+ span = tracer.start_span(
287
+ name,
288
+ kind=SpanKind.CLIENT,
289
+ attributes={
290
+ GenAIAttributes.GEN_AI_SYSTEM: "Google",
291
+ SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
292
+ },
293
+ )
294
+
295
+ # Use sync version for non-async wrapper to avoid image processing for now
296
+ set_model_input_attributes(span, kwargs, llm_model)
297
+ if should_emit_events():
298
+ emit_prompt_events(args, event_logger)
299
+ else:
300
+ set_input_attributes_sync(span, args)
301
+
302
+ response = wrapped(*args, **kwargs)
303
+
304
+ if response:
305
+ if is_streaming_response(response):
306
+ return _build_from_streaming_response(
307
+ span, event_logger, response, llm_model
308
+ )
309
+ elif is_async_streaming_response(response):
310
+ return _abuild_from_streaming_response(
311
+ span, event_logger, response, llm_model
312
+ )
313
+ else:
314
+ _handle_response(span, event_logger, response, llm_model)
315
+
316
+ span.end()
317
+ return response
318
+
319
+
320
+ class VertexAIInstrumentor(BaseInstrumentor):
321
+ """An instrumentor for VertextAI's client library."""
322
+
323
+ def __init__(self, exception_logger=None, use_legacy_attributes=True, upload_base64_image=None):
324
+ super().__init__()
325
+ Config.exception_logger = exception_logger
326
+ Config.use_legacy_attributes = use_legacy_attributes
327
+ if upload_base64_image:
328
+ Config.upload_base64_image = upload_base64_image
329
+
330
+ def instrumentation_dependencies(self) -> Collection[str]:
331
+ return _instruments
332
+
333
+ def _instrument(self, **kwargs):
334
+ tracer_provider = kwargs.get("tracer_provider")
335
+ tracer = get_tracer(__name__, __version__, tracer_provider)
336
+
337
+ event_logger = None
338
+
339
+ if should_emit_events():
340
+ logger_provider = kwargs.get("logger_provider")
341
+ event_logger = get_logger(
342
+ __name__,
343
+ __version__,
344
+ logger_provider=logger_provider,
345
+ )
346
+
347
+ for wrapped_method in WRAPPED_METHODS:
348
+ wrap_package = wrapped_method.get("package")
349
+ wrap_object = wrapped_method.get("object")
350
+ wrap_method = wrapped_method.get("method")
351
+
352
+ wrap_function_wrapper(
353
+ wrap_package,
354
+ f"{wrap_object}.{wrap_method}",
355
+ (
356
+ _awrap(tracer, event_logger, wrapped_method)
357
+ if wrapped_method.get("is_async")
358
+ else _wrap(tracer, event_logger, wrapped_method)
359
+ ),
360
+ )
361
+
362
+ def _uninstrument(self, **kwargs):
363
+ for wrapped_method in WRAPPED_METHODS:
364
+ wrap_package = wrapped_method.get("package")
365
+ wrap_object = wrapped_method.get("object")
366
+ unwrap(
367
+ f"{wrap_package}.{wrap_object}",
368
+ wrapped_method.get("method", ""),
369
+ )
@@ -0,0 +1,9 @@
1
+ from typing import Callable
2
+
3
+
4
+ class Config:
5
+ exception_logger = None
6
+ use_legacy_attributes = True
7
+ upload_base64_image: Callable[[str, str, str, str], str] = (
8
+ lambda trace_id, span_id, image_name, base64_string: str
9
+ )
@@ -0,0 +1,173 @@
1
+ from dataclasses import asdict
2
+ from enum import Enum
3
+ from typing import Union
4
+
5
+ from opentelemetry._logs import LogRecord
6
+ from opentelemetry.instrumentation.vertexai.event_models import (
7
+ ChoiceEvent,
8
+ MessageEvent,
9
+ )
10
+ from opentelemetry.instrumentation.vertexai.utils import (
11
+ dont_throw,
12
+ should_emit_events,
13
+ should_send_prompts,
14
+ )
15
+ from opentelemetry.semconv._incubating.attributes import (
16
+ gen_ai_attributes as GenAIAttributes,
17
+ )
18
+
19
+ from vertexai.generative_models import GenerationResponse
20
+
21
+
22
+ class Roles(Enum):
23
+ USER = "user"
24
+ ASSISTANT = "assistant"
25
+ SYSTEM = "system"
26
+ TOOL = "tool"
27
+
28
+
29
+ VALID_MESSAGE_ROLES = {role.value for role in Roles}
30
+ """The valid roles for naming the message event."""
31
+
32
+ EVENT_ATTRIBUTES = {
33
+ GenAIAttributes.GEN_AI_SYSTEM: GenAIAttributes.GenAiSystemValues.VERTEX_AI.value
34
+ }
35
+ """The attributes to be used for the event."""
36
+
37
+
38
+ def _parse_vertex_finish_reason(reason):
39
+ if reason is None:
40
+ return "unknown"
41
+
42
+ finish_reason_map = {
43
+ 0: "unspecified",
44
+ 1: "stop",
45
+ 2: "max_tokens",
46
+ 3: "safety",
47
+ 4: "recitation",
48
+ 5: "other",
49
+ 6: "blocklist",
50
+ 7: "prohibited_content",
51
+ 8: "spii",
52
+ 9: "malformed_function_call",
53
+ }
54
+
55
+ if hasattr(reason, "value"):
56
+ reason_value = reason.value
57
+ else:
58
+ reason_value = reason
59
+
60
+ return finish_reason_map.get(reason_value, "unknown")
61
+
62
+
63
+ @dont_throw
64
+ def emit_prompt_events(args, event_logger):
65
+ prompt = ""
66
+ if args is not None and len(args) > 0:
67
+ for arg in args:
68
+ if isinstance(arg, str):
69
+ prompt = f"{prompt}{arg}\n"
70
+ elif isinstance(arg, list):
71
+ for subarg in arg:
72
+ prompt = f"{prompt}{subarg}\n"
73
+ emit_event(MessageEvent(content=prompt, role=Roles.USER.value), event_logger)
74
+
75
+
76
+ def emit_response_events(response, event_logger):
77
+ if isinstance(response, str):
78
+ emit_event(
79
+ ChoiceEvent(
80
+ index=0,
81
+ message={"content": response, "role": Roles.ASSISTANT.value},
82
+ finish_reason="unknown",
83
+ ),
84
+ event_logger,
85
+ )
86
+ elif isinstance(response, GenerationResponse):
87
+ for candidate in response.candidates:
88
+ emit_event(
89
+ ChoiceEvent(
90
+ index=candidate.index,
91
+ message={
92
+ "content": candidate.text,
93
+ "role": Roles.ASSISTANT.value,
94
+ },
95
+ finish_reason=_parse_vertex_finish_reason(candidate.finish_reason),
96
+ ),
97
+ event_logger,
98
+ )
99
+
100
+
101
+ def emit_event(event: Union[MessageEvent, ChoiceEvent], event_logger) -> None:
102
+ """
103
+ Emit an event to the OpenTelemetry SDK.
104
+
105
+ Args:
106
+ event: The event to emit.
107
+ """
108
+ if not should_emit_events() or event_logger is None:
109
+ return
110
+
111
+ if isinstance(event, MessageEvent):
112
+ _emit_message_event(event, event_logger)
113
+ elif isinstance(event, ChoiceEvent):
114
+ _emit_choice_event(event, event_logger)
115
+ else:
116
+ raise TypeError("Unsupported event type")
117
+
118
+
119
+ def _emit_message_event(event: MessageEvent, event_logger) -> None:
120
+ body = asdict(event)
121
+
122
+ if event.role in VALID_MESSAGE_ROLES:
123
+ name = "gen_ai.{}.message".format(event.role)
124
+ # According to the semantic conventions, the role is conditionally required if available
125
+ # and not equal to the "role" in the message name. So, remove the role from the body if
126
+ # it is the same as the in the event name.
127
+ body.pop("role", None)
128
+ else:
129
+ name = "gen_ai.user.message"
130
+
131
+ # According to the semantic conventions, only the assistant role has tool call
132
+ if event.role != Roles.ASSISTANT.value and event.tool_calls is not None:
133
+ del body["tool_calls"]
134
+ elif event.tool_calls is None:
135
+ del body["tool_calls"]
136
+
137
+ if not should_send_prompts():
138
+ del body["content"]
139
+ if body.get("tool_calls") is not None:
140
+ for tool_call in body["tool_calls"]:
141
+ tool_call["function"].pop("arguments", None)
142
+
143
+ log_record = LogRecord(
144
+ body=body,
145
+ attributes=EVENT_ATTRIBUTES,
146
+ event_name=name
147
+ )
148
+ event_logger.emit(log_record)
149
+
150
+
151
+ def _emit_choice_event(event: ChoiceEvent, event_logger) -> None:
152
+ body = asdict(event)
153
+ if event.message["role"] == Roles.ASSISTANT.value:
154
+ # According to the semantic conventions, the role is conditionally required if available
155
+ # and not equal to "assistant", so remove the role from the body if it is "assistant".
156
+ body["message"].pop("role", None)
157
+
158
+ if event.tool_calls is None:
159
+ del body["tool_calls"]
160
+
161
+ if not should_send_prompts():
162
+ body["message"].pop("content", None)
163
+ if body.get("tool_calls") is not None:
164
+ for tool_call in body["tool_calls"]:
165
+ tool_call["function"].pop("arguments", None)
166
+
167
+ log_record = LogRecord(
168
+ body=body,
169
+ attributes=EVENT_ATTRIBUTES,
170
+ event_name="gen_ai.choice"
171
+
172
+ )
173
+ event_logger.emit(log_record)
@@ -0,0 +1,41 @@
1
+ from dataclasses import dataclass
2
+ from typing import Any, List, Literal, Optional, TypedDict
3
+
4
+
5
+ class _FunctionToolCall(TypedDict):
6
+ function_name: str
7
+ arguments: Optional[dict[str, Any]]
8
+
9
+
10
+ class ToolCall(TypedDict):
11
+ """Represents a tool call in the AI model."""
12
+
13
+ id: str
14
+ function: _FunctionToolCall
15
+ type: Literal["function"]
16
+
17
+
18
+ class CompletionMessage(TypedDict):
19
+ """Represents a message in the AI model."""
20
+
21
+ content: Any
22
+ role: str = "assistant"
23
+
24
+
25
+ @dataclass
26
+ class MessageEvent:
27
+ """Represents an input event for the AI model."""
28
+
29
+ content: Any
30
+ role: str = "user"
31
+ tool_calls: Optional[List[ToolCall]] = None
32
+
33
+
34
+ @dataclass
35
+ class ChoiceEvent:
36
+ """Represents a completion event for the AI model."""
37
+
38
+ index: int
39
+ message: CompletionMessage
40
+ finish_reason: str = "unknown"
41
+ tool_calls: Optional[List[ToolCall]] = None